summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-15 17:39:07 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-15 17:39:07 -0700
commit54dbe75bbf1e189982516de179147208e90b5e45 (patch)
tree523ba6dd21d2f9257b73d95d289095b116da0f75 /drivers/gpu/drm/i915
parentdafa5f6577a9eecd2941add553d1672c30b02364 (diff)
parent557ce95051c8eff67af48612ab350d8408aa0541 (diff)
Merge tag 'drm-next-2018-08-15' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "This is the main drm pull request for 4.19. Rob has some new hardware support for new qualcomm hw that I'll send along separately. This has the display part of it, the remaining pull is for the acceleration engine. This also contains a wound-wait/wait-die mutex rework, Peter has acked it for merging via my tree. Otherwise mostly the usual level of activity. Summary: core: - Wound-wait/wait-die mutex rework - Add writeback connector type - Add "content type" property for HDMI - Move GEM bo to drm_framebuffer - Initial gpu scheduler documentation - GPU scheduler fixes for dying processes - Console deferred fbcon takeover support - Displayport support for CEC tunneling over AUX panel: - otm8009a panel driver fixes - Innolux TV123WAM and G070Y2-L01 panel driver - Ilitek ILI9881c panel driver - Rocktech RK070ER9427 LCD - EDT ETM0700G0EDH6 and EDT ETM0700G0BDH6 - DLC DLC0700YZG-1 - BOE HV070WSA-100 - newhaven, nhd-4.3-480272ef-atxl LCD - DataImage SCF0700C48GGU18 - Sharp LQ035Q7DB03 - p079zca: Refactor to support multiple panels tinydrm: - ILI9341 display panel New driver: - vkms - virtual kms driver to testing. i915: - Icelake: Display enablement DSI support IRQ support Powerwell support - GPU reset fixes and improvements - Full ppgtt support refactoring - PSR fixes and improvements - Execlist improvments - GuC related fixes amdgpu: - Initial amdgpu documentation - JPEG engine support on VCN - CIK uses powerplay by default - Move to using core PCIE functionality for gens/lanes - DC/Powerplay interface rework - Stutter mode support for RV - Vega12 Powerplay updates - GFXOFF fixes - GPUVM fault debugging - Vega12 GFXOFF - DC improvements - DC i2c/aux changes - UVD 7.2 fixes - Powerplay fixes for Polaris12, CZ/ST - command submission bo_list fixes amdkfd: - Raven support - Power management fixes udl: - Cleanups and fixes nouveau: - misc fixes and cleanups. msm: - DPU1 support display controller in sdm845 - GPU coredump support. vmwgfx: - Atomic modesetting validation fixes - Support for multisample surfaces armada: - Atomic modesetting support completed. exynos: - IPPv2 fixes - Move g2d to component framework - Suspend/resume support cleanups - Driver cleanups imx: - CSI configuration improvements - Driver cleanups - Use atomic suspend/resume helpers - ipu-v3 V4L2 XRGB32/XBGR32 support pl111: - Add Nomadik LCDC variant v3d: - GPU scheduler jobs management sun4i: - R40 display engine support - TCON TOP driver mediatek: - MT2712 SoC support rockchip: - vop fixes omapdrm: - Workaround for DRA7 errata i932 - Fix mm_list locking mali-dp: - Writeback implementation PM improvements - Internal error reporting debugfs tilcdc: - Single fix for deferred probing hdlcd: - Teardown fixes tda998x: - Converted to a bridge driver. etnaviv: - Misc fixes" * tag 'drm-next-2018-08-15' of git://anongit.freedesktop.org/drm/drm: (1506 commits) drm/amdgpu/sriov: give 8s for recover vram under RUNTIME drm/scheduler: fix param documentation drm/i2c: tda998x: correct PLL divider calculation drm/i2c: tda998x: get rid of private fill_modes function drm/i2c: tda998x: move mode_valid() to bridge drm/i2c: tda998x: register bridge outside of component helper drm/i2c: tda998x: cleanup from previous changes drm/i2c: tda998x: allocate tda998x_priv inside tda998x_create() drm/i2c: tda998x: convert to bridge driver drm/scheduler: fix timeout worker setup for out of order job completions drm/amd/display: display connected to dp-1 does not light up drm/amd/display: update clk for various HDMI color depths drm/amd/display: program display clock on cache match drm/amd/display: Add NULL check for enabling dp ss drm/amd/display: add vbios table check for enabling dp ss drm/amd/display: Don't share clk source between DP and HDMI drm/amd/display: Fix DP HBR2 Eye Diagram Pattern on Carrizo drm/amd/display: Use calculated disp_clk_khz value for dce110 drm/amd/display: Implement custom degamma lut on dcn drm/amd/display: Destroy aux_engines only once ...
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug12
-rw-r--r--drivers/gpu/drm/i915/Makefile7
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c20
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c22
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c26
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c44
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c43
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c62
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c26
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c20
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h13
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c434
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c31
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h20
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c438
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c130
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h11
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h7
-rw-r--r--drivers/gpu/drm/i915/gvt/page_track.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c36
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c208
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c57
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c374
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c177
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h127
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c593
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h21
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c174
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h28
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c115
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1282
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h85
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h27
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c25
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c45
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c92
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c648
-rw-r--r--drivers/gpu/drm/i915/i915_params.c9
-rw-r--r--drivers/gpu/drm/i915/i915_params.h2
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c20
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c144
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c67
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h8
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4088
-rw-r--r--drivers/gpu/drm/i915/i915_request.c100
-rw-r--r--drivers/gpu/drm/i915/i915_request.h14
-rw-r--r--drivers/gpu/drm/i915/i915_selftest.h2
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.h2
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h148
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c8
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h6
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c385
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h53
-rw-r--r--drivers/gpu/drm/i915/icl_dsi.c127
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c27
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c7
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c18
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c48
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c129
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c16
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c61
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c55
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c178
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c2
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c782
-rw-r--r--drivers/gpu/drm/i915/intel_display.h26
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c531
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c12
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c39
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c17
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c205
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h18
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h99
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h34
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c15
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c288
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c129
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c9
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c160
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h5
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h20
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c70
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.h26
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c138
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c5
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c17
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c140
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c118
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c8
-rw-r--r--drivers/gpu/drm/i915/intel_huc.h6
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c84
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c979
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h7
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c204
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c2
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c31
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.h1
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c28
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c445
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c109
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c631
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c405
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h139
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c333
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c49
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c311
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c23
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c165
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c28
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h22
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h14
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c167
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c82
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c47
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c301
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c39
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c174
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c62
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c33
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c55
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_wedge_me.h58
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c260
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c133
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_workarounds.c24
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c7
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_dmabuf.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c49
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c20
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c65
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi.c (renamed from drivers/gpu/drm/i915/intel_dsi.c)117
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi_pll.c (renamed from drivers/gpu/drm/i915/intel_dsi_pll.c)98
160 files changed, 12182 insertions, 7989 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 9de8b1c51a5c..459f8f88a34c 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -51,6 +51,18 @@ config DRM_I915_DEBUG_GEM
If in doubt, say "N".
+config DRM_I915_ERRLOG_GEM
+ bool "Insert extra logging (very verbose) for common GEM errors"
+ default n
+ depends on DRM_I915_DEBUG_GEM
+ help
+ Enable additional logging that may help track down the cause of
+ principally userspace errors.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_I915_TRACE_GEM
bool "Insert extra ftrace output from the GEM internals"
depends on DRM_I915_DEBUG_GEM
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 4c6adae23e18..5794f102f9b8 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -135,15 +135,14 @@ i915-y += dvo_ch7017.o \
dvo_ns2501.o \
dvo_sil164.o \
dvo_tfp410.o \
+ icl_dsi.o \
intel_crt.o \
intel_ddi.o \
intel_dp_aux_backlight.o \
intel_dp_link_training.o \
intel_dp_mst.o \
intel_dp.o \
- intel_dsi.o \
intel_dsi_dcs_backlight.o \
- intel_dsi_pll.o \
intel_dsi_vbt.o \
intel_dvo.o \
intel_hdmi.o \
@@ -152,7 +151,9 @@ i915-y += dvo_ch7017.o \
intel_lvds.o \
intel_panel.o \
intel_sdvo.o \
- intel_tv.o
+ intel_tv.o \
+ vlv_dsi.o \
+ vlv_dsi_pll.o
# Post-mortem debug and GPU hang state capture
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 80b3e16cf48c..caac9942e1e3 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -159,7 +159,7 @@
#define CH7017_BANG_LIMIT_CONTROL 0x7f
struct ch7017_priv {
- uint8_t dummy;
+ u8 dummy;
};
static void ch7017_dump_regs(struct intel_dvo_device *dvo);
@@ -186,7 +186,7 @@ static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
{
- uint8_t buf[2] = { addr, val };
+ u8 buf[2] = { addr, val };
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
@@ -258,11 +258,11 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
- uint8_t lvds_pll_feedback_div, lvds_pll_vco_control;
- uint8_t outputs_enable, lvds_control_2, lvds_power_down;
- uint8_t horizontal_active_pixel_input;
- uint8_t horizontal_active_pixel_output, vertical_active_line_output;
- uint8_t active_input_line_output;
+ u8 lvds_pll_feedback_div, lvds_pll_vco_control;
+ u8 outputs_enable, lvds_control_2, lvds_power_down;
+ u8 horizontal_active_pixel_input;
+ u8 horizontal_active_pixel_output, vertical_active_line_output;
+ u8 active_input_line_output;
DRM_DEBUG_KMS("Registers before mode setting\n");
ch7017_dump_regs(dvo);
@@ -333,7 +333,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
/* set the CH7017 power state */
static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
{
- uint8_t val;
+ u8 val;
ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
@@ -361,7 +361,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
{
- uint8_t val;
+ u8 val;
ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
@@ -373,7 +373,7 @@ static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
static void ch7017_dump_regs(struct intel_dvo_device *dvo)
{
- uint8_t val;
+ u8 val;
#define DUMP(reg) \
do { \
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 7aeeffd2428b..397ac5233726 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -85,7 +85,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
static struct ch7xxx_id_struct {
- uint8_t vid;
+ u8 vid;
char *name;
} ch7xxx_ids[] = {
{ CH7011_VID, "CH7011" },
@@ -96,7 +96,7 @@ static struct ch7xxx_id_struct {
};
static struct ch7xxx_did_struct {
- uint8_t did;
+ u8 did;
char *name;
} ch7xxx_dids[] = {
{ CH7xxx_DID, "CH7XXX" },
@@ -107,7 +107,7 @@ struct ch7xxx_priv {
bool quiet;
};
-static char *ch7xxx_get_id(uint8_t vid)
+static char *ch7xxx_get_id(u8 vid)
{
int i;
@@ -119,7 +119,7 @@ static char *ch7xxx_get_id(uint8_t vid)
return NULL;
}
-static char *ch7xxx_get_did(uint8_t did)
+static char *ch7xxx_get_did(u8 did)
{
int i;
@@ -132,7 +132,7 @@ static char *ch7xxx_get_did(uint8_t did)
}
/** Reads an 8 bit register */
-static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -170,11 +170,11 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
}
/** Writes an 8 bit register */
-static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- uint8_t out_buf[2];
+ u8 out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
@@ -201,7 +201,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
{
/* this will detect the CH7xxx chip on the specified i2c bus */
struct ch7xxx_priv *ch7xxx;
- uint8_t vendor, device;
+ u8 vendor, device;
char *name, *devid;
ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
@@ -244,7 +244,7 @@ out:
static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo)
{
- uint8_t cdet, orig_pm, pm;
+ u8 cdet, orig_pm, pm;
ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm);
@@ -276,7 +276,7 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
- uint8_t tvco, tpcp, tpd, tlpf, idf;
+ u8 tvco, tpcp, tpd, tlpf, idf;
if (mode->clock <= 65000) {
tvco = 0x23;
@@ -336,7 +336,7 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
int i;
for (i = 0; i < CH7xxx_NUM_REGS; i++) {
- uint8_t val;
+ u8 val;
if ((i % 8) == 0)
DRM_DEBUG_KMS("\n %02X: ", i);
ch7xxx_readb(dvo, i, &val);
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index c73aff163908..24278cc49090 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -161,7 +161,7 @@
* instead. The following list contains all registers that
* require saving.
*/
-static const uint16_t backup_addresses[] = {
+static const u16 backup_addresses[] = {
0x11, 0x12,
0x18, 0x19, 0x1a, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
@@ -174,11 +174,11 @@ static const uint16_t backup_addresses[] = {
struct ivch_priv {
bool quiet;
- uint16_t width, height;
+ u16 width, height;
/* Register backup */
- uint16_t reg_backup[ARRAY_SIZE(backup_addresses)];
+ u16 reg_backup[ARRAY_SIZE(backup_addresses)];
};
@@ -188,7 +188,7 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo);
*
* Each of the 256 registers are 16 bits long.
*/
-static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
+static bool ivch_read(struct intel_dvo_device *dvo, int addr, u16 *data)
{
struct ivch_priv *priv = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -231,7 +231,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
}
/* Writes a 16-bit register on the ivch */
-static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
+static bool ivch_write(struct intel_dvo_device *dvo, int addr, u16 data)
{
struct ivch_priv *priv = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -263,7 +263,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
struct ivch_priv *priv;
- uint16_t temp;
+ u16 temp;
int i;
priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
@@ -342,7 +342,7 @@ static void ivch_reset(struct intel_dvo_device *dvo)
static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
{
int i;
- uint16_t vr01, vr30, backlight;
+ u16 vr01, vr30, backlight;
ivch_reset(dvo);
@@ -379,7 +379,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
{
- uint16_t vr01;
+ u16 vr01;
ivch_reset(dvo);
@@ -398,9 +398,9 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
const struct drm_display_mode *adjusted_mode)
{
struct ivch_priv *priv = dvo->dev_priv;
- uint16_t vr40 = 0;
- uint16_t vr01 = 0;
- uint16_t vr10;
+ u16 vr40 = 0;
+ u16 vr01 = 0;
+ u16 vr10;
ivch_reset(dvo);
@@ -416,7 +416,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
if (mode->hdisplay != adjusted_mode->crtc_hdisplay ||
mode->vdisplay != adjusted_mode->crtc_vdisplay) {
- uint16_t x_ratio, y_ratio;
+ u16 x_ratio, y_ratio;
vr01 |= VR01_PANEL_FIT_ENABLE;
vr40 |= VR40_CLOCK_GATING_ENABLE;
@@ -438,7 +438,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
static void ivch_dump_regs(struct intel_dvo_device *dvo)
{
- uint16_t val;
+ u16 val;
ivch_read(dvo, VR00, &val);
DRM_DEBUG_KMS("VR00: 0x%04x\n", val);
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
index 2379c33cfe51..c584e01dc8dc 100644
--- a/drivers/gpu/drm/i915/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -191,8 +191,8 @@ enum {
};
struct ns2501_reg {
- uint8_t offset;
- uint8_t value;
+ u8 offset;
+ u8 value;
};
/*
@@ -202,23 +202,23 @@ struct ns2501_reg {
* read all this with a grain of salt.
*/
struct ns2501_configuration {
- uint8_t sync; /* configuration of the C0 register */
- uint8_t conf; /* configuration register 8 */
- uint8_t syncb; /* configuration register 41 */
- uint8_t dither; /* configuration of the dithering */
- uint8_t pll_a; /* PLL configuration, register A, 1B */
- uint16_t pll_b; /* PLL configuration, register B, 1C/1D */
- uint16_t hstart; /* horizontal start, registers C1/C2 */
- uint16_t hstop; /* horizontal total, registers C3/C4 */
- uint16_t vstart; /* vertical start, registers C5/C6 */
- uint16_t vstop; /* vertical total, registers C7/C8 */
- uint16_t vsync; /* manual vertical sync start, 80/81 */
- uint16_t vtotal; /* number of lines generated, 82/83 */
- uint16_t hpos; /* horizontal position + 256, 98/99 */
- uint16_t vpos; /* vertical position, 8e/8f */
- uint16_t voffs; /* vertical output offset, 9c/9d */
- uint16_t hscale; /* horizontal scaling factor, b8/b9 */
- uint16_t vscale; /* vertical scaling factor, 10/11 */
+ u8 sync; /* configuration of the C0 register */
+ u8 conf; /* configuration register 8 */
+ u8 syncb; /* configuration register 41 */
+ u8 dither; /* configuration of the dithering */
+ u8 pll_a; /* PLL configuration, register A, 1B */
+ u16 pll_b; /* PLL configuration, register B, 1C/1D */
+ u16 hstart; /* horizontal start, registers C1/C2 */
+ u16 hstop; /* horizontal total, registers C3/C4 */
+ u16 vstart; /* vertical start, registers C5/C6 */
+ u16 vstop; /* vertical total, registers C7/C8 */
+ u16 vsync; /* manual vertical sync start, 80/81 */
+ u16 vtotal; /* number of lines generated, 82/83 */
+ u16 hpos; /* horizontal position + 256, 98/99 */
+ u16 vpos; /* vertical position, 8e/8f */
+ u16 voffs; /* vertical output offset, 9c/9d */
+ u16 hscale; /* horizontal scaling factor, b8/b9 */
+ u16 vscale; /* vertical scaling factor, 10/11 */
};
/*
@@ -389,7 +389,7 @@ struct ns2501_priv {
** If it returns false, it might be wise to enable the
** DVO with the above function.
*/
-static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
+static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
{
struct ns2501_priv *ns = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -434,11 +434,11 @@ static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
** If it returns false, it might be wise to enable the
** DVO with the above function.
*/
-static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
{
struct ns2501_priv *ns = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- uint8_t out_buf[2];
+ u8 out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 1c1a0674dbab..4ae5d8fd9ff0 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -65,7 +65,7 @@ struct sil164_priv {
#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
-static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+static bool sil164_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
{
struct sil164_priv *sil = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -102,11 +102,11 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
return false;
}
-static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
{
struct sil164_priv *sil = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- uint8_t out_buf[2];
+ u8 out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
@@ -173,7 +173,7 @@ out:
static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo)
{
- uint8_t reg9;
+ u8 reg9;
sil164_readb(dvo, SIL164_REG9, &reg9);
@@ -243,7 +243,7 @@ static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
static void sil164_dump_regs(struct intel_dvo_device *dvo)
{
- uint8_t val;
+ u8 val;
sil164_readb(dvo, SIL164_FREQ_LO, &val);
DRM_DEBUG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 31e181da93db..d603bc2f2506 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -90,7 +90,7 @@ struct tfp410_priv {
bool quiet;
};
-static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -127,11 +127,11 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
return false;
}
-static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- uint8_t out_buf[2];
+ u8 out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
@@ -155,7 +155,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
static int tfp410_getid(struct intel_dvo_device *dvo, int addr)
{
- uint8_t ch1, ch2;
+ u8 ch1, ch2;
if (tfp410_readb(dvo, addr+0, &ch1) &&
tfp410_readb(dvo, addr+1, &ch2))
@@ -203,7 +203,7 @@ out:
static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
{
enum drm_connector_status ret = connector_status_disconnected;
- uint8_t ctl2;
+ u8 ctl2;
if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
if (ctl2 & TFP410_CTL_2_RSEN)
@@ -236,7 +236,7 @@ static void tfp410_mode_set(struct intel_dvo_device *dvo,
/* set the tfp410 power state */
static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
{
- uint8_t ctl1;
+ u8 ctl1;
if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
return;
@@ -251,7 +251,7 @@ static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
{
- uint8_t ctl1;
+ u8 ctl1;
if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
return false;
@@ -264,7 +264,7 @@ static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
static void tfp410_dump_regs(struct intel_dvo_device *dvo)
{
- uint8_t val, val2;
+ u8 val, val2;
tfp410_readb(dvo, TFP410_REV, &val);
DRM_DEBUG_KMS("TFP410_REV: 0x%02X\n", val);
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 7c9ec4f4f36c..380eeb2a0e83 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -61,7 +61,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
}
mutex_lock(&dev_priv->drm.struct_mutex);
- ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node,
+ ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 7f562410f9cf..45e89b1e0481 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -172,6 +172,7 @@ struct decode_info {
#define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD OP_3D_MEDIA(0x2, 0x0, 0x2)
#define OP_MEDIA_GATEWAY_STATE OP_3D_MEDIA(0x2, 0x0, 0x3)
#define OP_MEDIA_STATE_FLUSH OP_3D_MEDIA(0x2, 0x0, 0x4)
+#define OP_MEDIA_POOL_STATE OP_3D_MEDIA(0x2, 0x0, 0x5)
#define OP_MEDIA_OBJECT OP_3D_MEDIA(0x2, 0x1, 0x0)
#define OP_MEDIA_OBJECT_PRT OP_3D_MEDIA(0x2, 0x1, 0x2)
@@ -1279,7 +1280,9 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
if (!info->async_flip)
return 0;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) {
stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
@@ -1307,7 +1310,9 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) {
set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val);
set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
@@ -1331,7 +1336,9 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
if (IS_BROADWELL(dev_priv))
return gen8_decode_mi_display_flip(s, info);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv))
return skl_decode_mi_display_flip(s, info);
return -ENODEV;
@@ -1340,26 +1347,14 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
static int check_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
-
- if (IS_BROADWELL(dev_priv)
- || IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv))
- return gen8_check_mi_display_flip(s, info);
- return -ENODEV;
+ return gen8_check_mi_display_flip(s, info);
}
static int update_plane_mmio_from_mi_display_flip(
struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
-
- if (IS_BROADWELL(dev_priv)
- || IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv))
- return gen8_update_plane_mmio_from_mi_display_flip(s, info);
- return -ENODEV;
+ return gen8_update_plane_mmio_from_mi_display_flip(s, info);
}
static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
@@ -1638,15 +1633,10 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
*/
static int batch_buffer_needs_scan(struct parser_exec_state *s)
{
- struct intel_gvt *gvt = s->vgpu->gvt;
-
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv)) {
- /* BDW decides privilege based on address space */
- if (cmd_val(s, 0) & (1 << 8) &&
+ /* Decide privilege based on address space */
+ if (cmd_val(s, 0) & (1 << 8) &&
!(s->vgpu->scan_nonprivbb & (1 << s->ring_id)))
- return 0;
- }
+ return 0;
return 1;
}
@@ -2372,6 +2362,9 @@ static struct cmd_info cmd_info[] = {
{"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
0, 16, NULL},
+ {"MEDIA_POOL_STATE", OP_MEDIA_POOL_STATE, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
{"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
{"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 4b072ade8c38..3019dbc39aef 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -171,6 +171,29 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int pipe;
+ if (IS_BROXTON(dev_priv)) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~(BXT_DE_PORT_HP_DDIA |
+ BXT_DE_PORT_HP_DDIB |
+ BXT_DE_PORT_HP_DDIC);
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+ BXT_DE_PORT_HP_DDIA;
+ }
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+ BXT_DE_PORT_HP_DDIB;
+ }
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+ BXT_DE_PORT_HP_DDIC;
+ }
+
+ return;
+ }
+
vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
@@ -273,8 +296,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
for_each_pipe(dev_priv, pipe) {
vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE;
vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
- vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE;
- vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE;
+ vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE;
+ vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE;
}
vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
@@ -337,26 +360,28 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
struct intel_gvt_irq *irq = &gvt->irq;
struct intel_vgpu *vgpu;
int pipe, id;
+ int found = false;
- if (WARN_ON(!mutex_is_locked(&gvt->lock)))
- return;
-
+ mutex_lock(&gvt->lock);
for_each_active_vgpu(gvt, vgpu, id) {
for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
- if (pipe_is_enabled(vgpu, pipe))
- goto out;
+ if (pipe_is_enabled(vgpu, pipe)) {
+ found = true;
+ break;
+ }
}
+ if (found)
+ break;
}
/* all the pipes are disabled */
- hrtimer_cancel(&irq->vblank_timer.timer);
- return;
-
-out:
- hrtimer_start(&irq->vblank_timer.timer,
- ktime_add_ns(ktime_get(), irq->vblank_timer.period),
- HRTIMER_MODE_ABS);
-
+ if (!found)
+ hrtimer_cancel(&irq->vblank_timer.timer);
+ else
+ hrtimer_start(&irq->vblank_timer.timer,
+ ktime_add_ns(ktime_get(), irq->vblank_timer.period),
+ HRTIMER_MODE_ABS);
+ mutex_unlock(&gvt->lock);
}
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
@@ -393,8 +418,10 @@ static void emulate_vblank(struct intel_vgpu *vgpu)
{
int pipe;
+ mutex_lock(&vgpu->vgpu_lock);
for_each_pipe(vgpu->gvt->dev_priv, pipe)
emulate_vblank_on_pipe(vgpu, pipe);
+ mutex_unlock(&vgpu->vgpu_lock);
}
/**
@@ -409,11 +436,10 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
struct intel_vgpu *vgpu;
int id;
- if (WARN_ON(!mutex_is_locked(&gvt->lock)))
- return;
-
+ mutex_lock(&gvt->lock);
for_each_active_vgpu(gvt, vgpu, id)
emulate_vblank(vgpu);
+ mutex_unlock(&gvt->lock);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 6f4f8e941fc2..6e3f56684f4e 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -164,7 +164,9 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = 0;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) {
unsigned int tiling_mode = 0;
unsigned int stride = 0;
@@ -192,6 +194,14 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
return obj;
}
+static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c)
+{
+ if (c && c->x_hot <= c->width && c->y_hot <= c->height)
+ return true;
+ else
+ return false;
+}
+
static int vgpu_get_plane_info(struct drm_device *dev,
struct intel_vgpu *vgpu,
struct intel_vgpu_fb_info *info,
@@ -229,12 +239,14 @@ static int vgpu_get_plane_info(struct drm_device *dev,
info->x_pos = c.x_pos;
info->y_pos = c.y_pos;
- /* The invalid cursor hotspot value is delivered to host
- * until we find a way to get the cursor hotspot info of
- * guest OS.
- */
- info->x_hot = UINT_MAX;
- info->y_hot = UINT_MAX;
+ if (validate_hotspot(&c)) {
+ info->x_hot = c.x_hot;
+ info->y_hot = c.y_hot;
+ } else {
+ info->x_hot = UINT_MAX;
+ info->y_hot = UINT_MAX;
+ }
+
info->size = (((info->stride * c.height * c.bpp) / 8)
+ (PAGE_SIZE - 1)) >> PAGE_SHIFT;
} else {
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index f61337632969..4b98539025c5 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -77,6 +77,20 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
return chr;
}
+static inline int bxt_get_port_from_gmbus0(u32 gmbus0)
+{
+ int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
+ int port = -EINVAL;
+
+ if (port_select == 1)
+ port = PORT_B;
+ else if (port_select == 2)
+ port = PORT_C;
+ else if (port_select == 3)
+ port = PORT_D;
+ return port;
+}
+
static inline int get_port_from_gmbus0(u32 gmbus0)
{
int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
@@ -105,6 +119,7 @@ static void reset_gmbus_controller(struct intel_vgpu *vgpu)
static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int port, pin_select;
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
@@ -116,7 +131,10 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
if (pin_select == 0)
return 0;
- port = get_port_from_gmbus0(pin_select);
+ if (IS_BROXTON(dev_priv))
+ port = bxt_get_port_from_gmbus0(pin_select);
+ else
+ port = get_port_from_gmbus0(pin_select);
if (WARN_ON(port < 0))
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 427e40e64d41..714d709829a2 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -146,14 +146,11 @@ struct execlist_ring_context {
u32 nop4;
u32 lri_cmd_2;
struct execlist_mmio_pair ctx_timestamp;
- struct execlist_mmio_pair pdp3_UDW;
- struct execlist_mmio_pair pdp3_LDW;
- struct execlist_mmio_pair pdp2_UDW;
- struct execlist_mmio_pair pdp2_LDW;
- struct execlist_mmio_pair pdp1_UDW;
- struct execlist_mmio_pair pdp1_LDW;
- struct execlist_mmio_pair pdp0_UDW;
- struct execlist_mmio_pair pdp0_LDW;
+ /*
+ * pdps[8]={ pdp3_UDW, pdp3_LDW, pdp2_UDW, pdp2_LDW,
+ * pdp1_UDW, pdp1_LDW, pdp0_UDW, pdp0_LDW}
+ */
+ struct execlist_mmio_pair pdps[8];
};
struct intel_vgpu_elsp_dwords {
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 1c120683e958..face664be3e8 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -36,6 +36,7 @@
#include <uapi/drm/drm_fourcc.h>
#include "i915_drv.h"
#include "gvt.h"
+#include "i915_pvinfo.h"
#define PRIMARY_FORMAT_NUM 16
struct pixel_format {
@@ -150,7 +151,9 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
u32 stride = stride_reg;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) {
switch (tiled) {
case PLANE_CTL_TILED_LINEAR:
stride = stride_reg * 64;
@@ -214,7 +217,9 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
if (!plane->enabled)
return -ENODEV;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) {
plane->tiled = (val & PLANE_CTL_TILED_MASK) >>
_PLANE_CTL_TILED_SHIFT;
fmt = skl_format_to_drm(
@@ -256,7 +261,9 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
}
plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10),
- (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) ?
+ (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv)) ?
(_PRI_PLANE_STRIDE_MASK >> 6) :
_PRI_PLANE_STRIDE_MASK, plane->bpp);
@@ -300,16 +307,16 @@ static int cursor_mode_to_drm(int mode)
int cursor_pixel_formats_index = 4;
switch (mode) {
- case CURSOR_MODE_128_ARGB_AX:
+ case MCURSOR_MODE_128_ARGB_AX:
cursor_pixel_formats_index = 0;
break;
- case CURSOR_MODE_256_ARGB_AX:
+ case MCURSOR_MODE_256_ARGB_AX:
cursor_pixel_formats_index = 1;
break;
- case CURSOR_MODE_64_ARGB_AX:
+ case MCURSOR_MODE_64_ARGB_AX:
cursor_pixel_formats_index = 2;
break;
- case CURSOR_MODE_64_32B_AX:
+ case MCURSOR_MODE_64_32B_AX:
cursor_pixel_formats_index = 3;
break;
@@ -342,8 +349,8 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
return -ENODEV;
val = vgpu_vreg_t(vgpu, CURCNTR(pipe));
- mode = val & CURSOR_MODE;
- plane->enabled = (mode != CURSOR_MODE_DISABLE);
+ mode = val & MCURSOR_MODE;
+ plane->enabled = (mode != MCURSOR_MODE_DISABLE);
if (!plane->enabled)
return -ENODEV;
@@ -384,6 +391,8 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT;
plane->y_sign = (val & _CURSOR_SIGN_Y_MASK) >> _CURSOR_SIGN_Y_SHIFT;
+ plane->x_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot));
+ plane->y_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot));
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index a73e1d418c22..4ac18b447247 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -162,7 +162,7 @@ static int verify_firmware(struct intel_gvt *gvt,
h = (struct gvt_firmware_header *)fw->data;
- crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
+ crc32_start = offsetofend(struct gvt_firmware_header, crc32);
mem = fw->data + crc32_start;
#define VERIFY(s, a, b) do { \
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 4efec8fa6c1d..00aad8164dec 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -216,16 +216,22 @@ static struct gtt_type_table_entry gtt_type_table[] = {
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
+ /* We take IPS bit as 'PSE' for PTE level. */
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_INVALID,
- GTT_TYPE_INVALID),
+ GTT_TYPE_PPGTT_PTE_64K_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_INVALID,
- GTT_TYPE_INVALID),
+ GTT_TYPE_PPGTT_PTE_64K_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_PT,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_PPGTT_PTE_64K_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
GTT_TYPE_PPGTT_PDE_ENTRY,
GTT_TYPE_PPGTT_PDE_PT,
@@ -339,8 +345,14 @@ static inline int gtt_set_entry64(void *pt,
#define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30)
#define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21)
+#define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16)
#define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12)
+#define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52)
+#define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */
+
+#define GTT_64K_PTE_STRIDE 16
+
static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
{
unsigned long pfn;
@@ -349,6 +361,8 @@ static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
+ else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY)
+ pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT;
else
pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
return pfn;
@@ -362,6 +376,9 @@ static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
} else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
e->val64 &= ~ADDR_2M_MASK;
pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
+ } else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) {
+ e->val64 &= ~ADDR_64K_MASK;
+ pfn &= (ADDR_64K_MASK >> PAGE_SHIFT);
} else {
e->val64 &= ~ADDR_4K_MASK;
pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
@@ -372,16 +389,41 @@ static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
{
- /* Entry doesn't have PSE bit. */
- if (get_pse_type(e->type) == GTT_TYPE_INVALID)
- return false;
+ return !!(e->val64 & _PAGE_PSE);
+}
- e->type = get_entry_type(e->type);
- if (!(e->val64 & _PAGE_PSE))
+static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e)
+{
+ if (gen8_gtt_test_pse(e)) {
+ switch (e->type) {
+ case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+ e->val64 &= ~_PAGE_PSE;
+ e->type = GTT_TYPE_PPGTT_PDE_ENTRY;
+ break;
+ case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
+ e->type = GTT_TYPE_PPGTT_PDP_ENTRY;
+ e->val64 &= ~_PAGE_PSE;
+ break;
+ default:
+ WARN_ON(1);
+ }
+ }
+}
+
+static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e)
+{
+ if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
return false;
- e->type = get_pse_type(e->type);
- return true;
+ return !!(e->val64 & GEN8_PDE_IPS_64K);
+}
+
+static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e)
+{
+ if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
+ return;
+
+ e->val64 &= ~GEN8_PDE_IPS_64K;
}
static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
@@ -408,6 +450,21 @@ static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
e->val64 |= _PAGE_PRESENT;
}
+static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
+{
+ return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED);
+}
+
+static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e)
+{
+ e->val64 |= GTT_SPTE_FLAG_64K_SPLITED;
+}
+
+static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e)
+{
+ e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED;
+}
+
/*
* Per-platform GMA routines.
*/
@@ -440,6 +497,12 @@ static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
.set_present = gtt_entry_set_present,
.test_present = gen8_gtt_test_present,
.test_pse = gen8_gtt_test_pse,
+ .clear_pse = gen8_gtt_clear_pse,
+ .clear_ips = gen8_gtt_clear_ips,
+ .test_ips = gen8_gtt_test_ips,
+ .clear_64k_splited = gen8_gtt_clear_64k_splited,
+ .set_64k_splited = gen8_gtt_set_64k_splited,
+ .test_64k_splited = gen8_gtt_test_64k_splited,
.get_pfn = gen8_gtt_get_pfn,
.set_pfn = gen8_gtt_set_pfn,
};
@@ -453,6 +516,27 @@ static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
.gma_to_pml4_index = gen8_gma_to_pml4_index,
};
+/* Update entry type per pse and ips bit. */
+static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops *pte_ops,
+ struct intel_gvt_gtt_entry *entry, bool ips)
+{
+ switch (entry->type) {
+ case GTT_TYPE_PPGTT_PDE_ENTRY:
+ case GTT_TYPE_PPGTT_PDP_ENTRY:
+ if (pte_ops->test_pse(entry))
+ entry->type = get_pse_type(entry->type);
+ break;
+ case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
+ if (ips)
+ entry->type = get_pse_type(entry->type);
+ break;
+ default:
+ GEM_BUG_ON(!gtt_type_is_entry(entry->type));
+ }
+
+ GEM_BUG_ON(entry->type == GTT_TYPE_INVALID);
+}
+
/*
* MM helpers.
*/
@@ -468,8 +552,7 @@ static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
mm->ppgtt_mm.shadow_pdps,
entry, index, false, 0, mm->vgpu);
-
- pte_ops->test_pse(entry);
+ update_entry_type_for_real(pte_ops, entry, false);
}
static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
@@ -574,7 +657,8 @@ static inline int ppgtt_spt_get_entry(
if (ret)
return ret;
- ops->test_pse(e);
+ update_entry_type_for_real(ops, e, guest ?
+ spt->guest_page.pde_ips : false);
gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
type, e->type, index, e->val64);
@@ -653,10 +737,12 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
- if (spt->guest_page.oos_page)
- detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
+ if (spt->guest_page.gfn) {
+ if (spt->guest_page.oos_page)
+ detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
- intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
+ intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
+ }
list_del_init(&spt->post_shadow_list);
free_spt(spt);
@@ -717,8 +803,9 @@ static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
+/* Allocate shadow page table without guest page. */
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
- struct intel_vgpu *vgpu, int type, unsigned long gfn)
+ struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type)
{
struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
struct intel_vgpu_ppgtt_spt *spt = NULL;
@@ -753,26 +840,12 @@ retry:
spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
- /*
- * Init guest_page.
- */
- spt->guest_page.type = type;
- spt->guest_page.gfn = gfn;
-
- ret = intel_vgpu_register_page_track(vgpu, spt->guest_page.gfn,
- ppgtt_write_protection_handler, spt);
- if (ret)
- goto err_unmap_dma;
-
ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
if (ret)
- goto err_unreg_page_track;
+ goto err_unmap_dma;
- trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
return spt;
-err_unreg_page_track:
- intel_vgpu_unregister_page_track(vgpu, spt->guest_page.gfn);
err_unmap_dma:
dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
err_free_spt:
@@ -780,6 +853,37 @@ err_free_spt:
return ERR_PTR(ret);
}
+/* Allocate shadow page table associated with specific gfn. */
+static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
+ struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type,
+ unsigned long gfn, bool guest_pde_ips)
+{
+ struct intel_vgpu_ppgtt_spt *spt;
+ int ret;
+
+ spt = ppgtt_alloc_spt(vgpu, type);
+ if (IS_ERR(spt))
+ return spt;
+
+ /*
+ * Init guest_page.
+ */
+ ret = intel_vgpu_register_page_track(vgpu, gfn,
+ ppgtt_write_protection_handler, spt);
+ if (ret) {
+ ppgtt_free_spt(spt);
+ return ERR_PTR(ret);
+ }
+
+ spt->guest_page.type = type;
+ spt->guest_page.gfn = gfn;
+ spt->guest_page.pde_ips = guest_pde_ips;
+
+ trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
+
+ return spt;
+}
+
#define pt_entry_size_shift(spt) \
((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
@@ -787,24 +891,38 @@ err_free_spt:
(I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
#define for_each_present_guest_entry(spt, e, i) \
- for (i = 0; i < pt_entries(spt); i++) \
+ for (i = 0; i < pt_entries(spt); \
+ i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
if (!ppgtt_get_guest_entry(spt, e, i) && \
spt->vgpu->gvt->gtt.pte_ops->test_present(e))
#define for_each_present_shadow_entry(spt, e, i) \
- for (i = 0; i < pt_entries(spt); i++) \
+ for (i = 0; i < pt_entries(spt); \
+ i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
if (!ppgtt_get_shadow_entry(spt, e, i) && \
spt->vgpu->gvt->gtt.pte_ops->test_present(e))
-static void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
+#define for_each_shadow_entry(spt, e, i) \
+ for (i = 0; i < pt_entries(spt); \
+ i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
+ if (!ppgtt_get_shadow_entry(spt, e, i))
+
+static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
{
int v = atomic_read(&spt->refcount);
trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
-
atomic_inc(&spt->refcount);
}
+static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt)
+{
+ int v = atomic_read(&spt->refcount);
+
+ trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
+ return atomic_dec_return(&spt->refcount);
+}
+
static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
@@ -843,7 +961,8 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
pfn = ops->get_pfn(entry);
type = spt->shadow_page.type;
- if (pfn == vgpu->gtt.scratch_pt[type].page_mfn)
+ /* Uninitialized spte or unshadowed spte. */
+ if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
return;
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
@@ -855,14 +974,11 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
struct intel_gvt_gtt_entry e;
unsigned long index;
int ret;
- int v = atomic_read(&spt->refcount);
trace_spt_change(spt->vgpu->id, "die", spt,
spt->guest_page.gfn, spt->shadow_page.type);
- trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
-
- if (atomic_dec_return(&spt->refcount) > 0)
+ if (ppgtt_put_spt(spt) > 0)
return 0;
for_each_present_shadow_entry(spt, &e, index) {
@@ -871,9 +987,15 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
gvt_vdbg_mm("invalidate 4K entry\n");
ppgtt_invalidate_pte(spt, &e);
break;
+ case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
+ /* We don't setup 64K shadow entry so far. */
+ WARN(1, "suspicious 64K gtt entry\n");
+ continue;
case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+ gvt_vdbg_mm("invalidate 2M entry\n");
+ continue;
case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
- WARN(1, "GVT doesn't support 2M/1GB page\n");
+ WARN(1, "GVT doesn't support 1GB page\n");
continue;
case GTT_TYPE_PPGTT_PML4_ENTRY:
case GTT_TYPE_PPGTT_PDP_ENTRY:
@@ -899,6 +1021,22 @@ fail:
return ret;
}
+static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
+ u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
+ GAMW_ECO_ENABLE_64K_IPS_FIELD;
+
+ return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD;
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ /* 64K paging only controlled by IPS bit in PTE now. */
+ return true;
+ } else
+ return false;
+}
+
static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
@@ -906,35 +1044,54 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
{
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *spt = NULL;
+ bool ips = false;
int ret;
GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
+ if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY)
+ ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we);
+
spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
- if (spt)
+ if (spt) {
ppgtt_get_spt(spt);
- else {
+
+ if (ips != spt->guest_page.pde_ips) {
+ spt->guest_page.pde_ips = ips;
+
+ gvt_dbg_mm("reshadow PDE since ips changed\n");
+ clear_page(spt->shadow_page.vaddr);
+ ret = ppgtt_populate_spt(spt);
+ if (ret) {
+ ppgtt_put_spt(spt);
+ goto err;
+ }
+ }
+ } else {
int type = get_next_pt_type(we->type);
- spt = ppgtt_alloc_spt(vgpu, type, ops->get_pfn(we));
+ spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
if (IS_ERR(spt)) {
ret = PTR_ERR(spt);
- goto fail;
+ goto err;
}
ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
if (ret)
- goto fail;
+ goto err_free_spt;
ret = ppgtt_populate_spt(spt);
if (ret)
- goto fail;
+ goto err_free_spt;
trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
spt->shadow_page.type);
}
return spt;
-fail:
+
+err_free_spt:
+ ppgtt_free_spt(spt);
+err:
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
spt, we->val64, we->type);
return ERR_PTR(ret);
@@ -948,16 +1105,118 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
se->type = ge->type;
se->val64 = ge->val64;
+ /* Because we always split 64KB pages, so clear IPS in shadow PDE. */
+ if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY)
+ ops->clear_ips(se);
+
ops->set_pfn(se, s->shadow_page.mfn);
}
+/**
+ * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition,
+ * negtive if found err.
+ */
+static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
+ struct intel_gvt_gtt_entry *entry)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ unsigned long pfn;
+
+ if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M))
+ return 0;
+
+ pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
+ if (pfn == INTEL_GVT_INVALID_ADDR)
+ return -EINVAL;
+
+ return PageTransHuge(pfn_to_page(pfn));
+}
+
+static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
+ struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
+ struct intel_gvt_gtt_entry *se)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_vgpu_ppgtt_spt *sub_spt;
+ struct intel_gvt_gtt_entry sub_se;
+ unsigned long start_gfn;
+ dma_addr_t dma_addr;
+ unsigned long sub_index;
+ int ret;
+
+ gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index);
+
+ start_gfn = ops->get_pfn(se);
+
+ sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT);
+ if (IS_ERR(sub_spt))
+ return PTR_ERR(sub_spt);
+
+ for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
+ ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
+ start_gfn + sub_index, PAGE_SIZE, &dma_addr);
+ if (ret) {
+ ppgtt_invalidate_spt(spt);
+ return ret;
+ }
+ sub_se.val64 = se->val64;
+
+ /* Copy the PAT field from PDE. */
+ sub_se.val64 &= ~_PAGE_PAT;
+ sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5;
+
+ ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT);
+ ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index);
+ }
+
+ /* Clear dirty field. */
+ se->val64 &= ~_PAGE_DIRTY;
+
+ ops->clear_pse(se);
+ ops->clear_ips(se);
+ ops->set_pfn(se, sub_spt->shadow_page.mfn);
+ ppgtt_set_shadow_entry(spt, se, index);
+ return 0;
+}
+
+static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
+ struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
+ struct intel_gvt_gtt_entry *se)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_entry entry = *se;
+ unsigned long start_gfn;
+ dma_addr_t dma_addr;
+ int i, ret;
+
+ gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index);
+
+ GEM_BUG_ON(index % GTT_64K_PTE_STRIDE);
+
+ start_gfn = ops->get_pfn(se);
+
+ entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY;
+ ops->set_64k_splited(&entry);
+
+ for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
+ ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
+ start_gfn + i, PAGE_SIZE, &dma_addr);
+ if (ret)
+ return ret;
+
+ ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT);
+ ppgtt_set_shadow_entry(spt, &entry, index + i);
+ }
+ return 0;
+}
+
static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
struct intel_gvt_gtt_entry *ge)
{
struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry se = *ge;
- unsigned long gfn;
+ unsigned long gfn, page_size = PAGE_SIZE;
dma_addr_t dma_addr;
int ret;
@@ -970,16 +1229,33 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
gvt_vdbg_mm("shadow 4K gtt entry\n");
break;
+ case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
+ gvt_vdbg_mm("shadow 64K gtt entry\n");
+ /*
+ * The layout of 64K page is special, the page size is
+ * controlled by uper PDE. To be simple, we always split
+ * 64K page to smaller 4K pages in shadow PT.
+ */
+ return split_64KB_gtt_entry(vgpu, spt, index, &se);
case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+ gvt_vdbg_mm("shadow 2M gtt entry\n");
+ ret = is_2MB_gtt_possible(vgpu, ge);
+ if (ret == 0)
+ return split_2MB_gtt_entry(vgpu, spt, index, &se);
+ else if (ret < 0)
+ return ret;
+ page_size = I915_GTT_PAGE_SIZE_2M;
+ break;
case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
- gvt_vgpu_err("GVT doesn't support 2M/1GB entry\n");
+ gvt_vgpu_err("GVT doesn't support 1GB entry\n");
return -EINVAL;
default:
GEM_BUG_ON(1);
};
/* direct shadow */
- ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, &dma_addr);
+ ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
+ &dma_addr);
if (ret)
return -ENXIO;
@@ -1062,8 +1338,12 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
ret = ppgtt_invalidate_spt(s);
if (ret)
goto fail;
- } else
+ } else {
+ /* We don't setup 64K shadow entry so far. */
+ WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY,
+ "suspicious 64K entry\n");
ppgtt_invalidate_pte(spt, se);
+ }
return 0;
fail:
@@ -1286,7 +1566,7 @@ static int ppgtt_handle_guest_write_page_table(
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry old_se;
int new_present;
- int ret;
+ int i, ret;
new_present = ops->test_present(we);
@@ -1308,8 +1588,27 @@ static int ppgtt_handle_guest_write_page_table(
goto fail;
if (!new_present) {
- ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn);
- ppgtt_set_shadow_entry(spt, &old_se, index);
+ /* For 64KB splited entries, we need clear them all. */
+ if (ops->test_64k_splited(&old_se) &&
+ !(index % GTT_64K_PTE_STRIDE)) {
+ gvt_vdbg_mm("remove splited 64K shadow entries\n");
+ for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
+ ops->clear_64k_splited(&old_se);
+ ops->set_pfn(&old_se,
+ vgpu->gtt.scratch_pt[type].page_mfn);
+ ppgtt_set_shadow_entry(spt, &old_se, index + i);
+ }
+ } else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY ||
+ old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
+ ops->clear_pse(&old_se);
+ ops->set_pfn(&old_se,
+ vgpu->gtt.scratch_pt[type].page_mfn);
+ ppgtt_set_shadow_entry(spt, &old_se, index);
+ } else {
+ ops->set_pfn(&old_se,
+ vgpu->gtt.scratch_pt[type].page_mfn);
+ ppgtt_set_shadow_entry(spt, &old_se, index);
+ }
}
return 0;
@@ -1391,7 +1690,17 @@ static int ppgtt_handle_guest_write_page_table_bytes(
ppgtt_get_guest_entry(spt, &we, index);
- ops->test_pse(&we);
+ /*
+ * For page table which has 64K gtt entry, only PTE#0, PTE#16,
+ * PTE#32, ... PTE#496 are used. Unused PTEs update should be
+ * ignored.
+ */
+ if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY &&
+ (index % GTT_64K_PTE_STRIDE)) {
+ gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n",
+ index);
+ return 0;
+ }
if (bytes == info->gtt_entry_size) {
ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
@@ -1939,7 +2248,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
}
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
- &dma_addr);
+ PAGE_SIZE, &dma_addr);
if (ret) {
gvt_vgpu_err("fail to populate guest ggtt entry\n");
/* guest driver may read/write the entry when partial
@@ -2031,7 +2340,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
* GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
* is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
*/
- if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
+ if (type > GTT_TYPE_PPGTT_PTE_PT) {
struct intel_gvt_gtt_entry se;
memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
@@ -2315,13 +2624,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
gvt_dbg_core("init gtt\n");
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv)) {
- gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
- gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
- } else {
- return -ENODEV;
- }
+ gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
+ gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
page = (void *)get_zeroed_page(GFP_KERNEL);
if (!page) {
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 97e62647418a..7a9b36176efb 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -63,6 +63,12 @@ struct intel_gvt_gtt_pte_ops {
void (*clear_present)(struct intel_gvt_gtt_entry *e);
void (*set_present)(struct intel_gvt_gtt_entry *e);
bool (*test_pse)(struct intel_gvt_gtt_entry *e);
+ void (*clear_pse)(struct intel_gvt_gtt_entry *e);
+ bool (*test_ips)(struct intel_gvt_gtt_entry *e);
+ void (*clear_ips)(struct intel_gvt_gtt_entry *e);
+ bool (*test_64k_splited)(struct intel_gvt_gtt_entry *e);
+ void (*clear_64k_splited)(struct intel_gvt_gtt_entry *e);
+ void (*set_64k_splited)(struct intel_gvt_gtt_entry *e);
void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
};
@@ -95,6 +101,7 @@ typedef enum {
GTT_TYPE_GGTT_PTE,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_64K_ENTRY,
GTT_TYPE_PPGTT_PTE_2M_ENTRY,
GTT_TYPE_PPGTT_PTE_1G_ENTRY,
@@ -222,6 +229,7 @@ struct intel_vgpu_ppgtt_spt {
struct {
intel_gvt_gtt_type_t type;
+ bool pde_ips; /* for 64KB PTEs */
void *vaddr;
struct page *page;
unsigned long mfn;
@@ -229,6 +237,7 @@ struct intel_vgpu_ppgtt_spt {
struct {
intel_gvt_gtt_type_t type;
+ bool pde_ips; /* for 64KB PTEs */
unsigned long gfn;
unsigned long write_cnt;
struct intel_vgpu_oos_page *oos_page;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 61bd14fcb649..712f9d14e720 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -238,18 +238,15 @@ static void init_device_info(struct intel_gvt *gvt)
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv)) {
- info->max_support_vgpus = 8;
- info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
- info->mmio_size = 2 * 1024 * 1024;
- info->mmio_bar = 0;
- info->gtt_start_offset = 8 * 1024 * 1024;
- info->gtt_entry_size = 8;
- info->gtt_entry_size_shift = 3;
- info->gmadr_bytes_in_cmd = 8;
- info->max_surface_size = 36 * 1024 * 1024;
- }
+ info->max_support_vgpus = 8;
+ info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
+ info->mmio_size = 2 * 1024 * 1024;
+ info->mmio_bar = 0;
+ info->gtt_start_offset = 8 * 1024 * 1024;
+ info->gtt_entry_size = 8;
+ info->gtt_entry_size_shift = 3;
+ info->gmadr_bytes_in_cmd = 8;
+ info->max_surface_size = 36 * 1024 * 1024;
info->msi_cap_offset = pdev->msi_cap;
}
@@ -271,11 +268,8 @@ static int gvt_service_thread(void *data)
continue;
if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
- (void *)&gvt->service_request)) {
- mutex_lock(&gvt->lock);
+ (void *)&gvt->service_request))
intel_gvt_emulate_vblank(gvt);
- mutex_unlock(&gvt->lock);
- }
if (test_bit(INTEL_GVT_REQUEST_SCHED,
(void *)&gvt->service_request) ||
@@ -379,6 +373,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
idr_init(&gvt->vgpu_idr);
spin_lock_init(&gvt->scheduler.mmio_context_lock);
mutex_init(&gvt->lock);
+ mutex_init(&gvt->sched_lock);
gvt->dev_priv = dev_priv;
init_device_info(gvt);
@@ -473,3 +468,7 @@ out_clean_idr:
kfree(gvt);
return ret;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
+MODULE_SOFTDEP("pre: kvmgt");
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 858967daf04b..9a9671522774 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -170,12 +170,18 @@ struct intel_vgpu_submission {
struct intel_vgpu {
struct intel_gvt *gvt;
+ struct mutex vgpu_lock;
int id;
unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
bool active;
bool pv_notified;
bool failsafe;
unsigned int resetting_eng;
+
+ /* Both sched_data and sched_ctl can be seen a part of the global gvt
+ * scheduler structure. So below 2 vgpu data are protected
+ * by sched_lock, not vgpu_lock.
+ */
void *sched_data;
struct vgpu_sched_ctl sched_ctl;
@@ -296,7 +302,13 @@ struct intel_vgpu_type {
};
struct intel_gvt {
+ /* GVT scope lock, protect GVT itself, and all resource currently
+ * not yet protected by special locks(vgpu and scheduler lock).
+ */
struct mutex lock;
+ /* scheduler scope lock, protect gvt and vgpu schedule related data */
+ struct mutex sched_lock;
+
struct drm_i915_private *dev_priv;
struct idr vgpu_idr; /* vGPU IDR pool */
@@ -316,6 +328,10 @@ struct intel_gvt {
struct task_struct *service_thread;
wait_queue_head_t service_thread_wq;
+
+ /* service_request is always used in bit operation, we should always
+ * use it with atomic bit ops so that no need to use gvt big lock.
+ */
unsigned long service_request;
struct {
@@ -363,9 +379,9 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
-#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
+#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.vm.total)
#define gvt_ggtt_sz(gvt) \
- ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
+ ((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
#define gvt_aperture_gmadr_base(gvt) (0)
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 8f1caacdc78a..7a58ca555197 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -55,6 +55,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
return D_SKL;
else if (IS_KABYLAKE(gvt->dev_priv))
return D_KBL;
+ else if (IS_BROXTON(gvt->dev_priv))
+ return D_BXT;
return 0;
}
@@ -208,6 +210,31 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
return 0;
}
+static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
+
+ if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) {
+ if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
+ gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
+ else if (!ips)
+ gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id);
+ else {
+ /* All engines must be enabled together for vGPU,
+ * since we don't know which engine the ppgtt will
+ * bind to when shadowing.
+ */
+ gvt_vgpu_err("Unsupported IPS setting %x, cannot enable 64K gtt.\n",
+ ips);
+ return -EINVAL;
+ }
+ }
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ return 0;
+}
+
static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
void *p_data, unsigned int bytes)
{
@@ -255,7 +282,8 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
+ || IS_KABYLAKE(vgpu->gvt->dev_priv)
+ || IS_BROXTON(vgpu->gvt->dev_priv)) {
switch (offset) {
case FORCEWAKE_RENDER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@@ -316,6 +344,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
}
}
+ /* vgpu_lock already hold by emulate mmio r/w */
intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
/* sw will wait for the device to ack the reset request */
@@ -420,7 +449,10 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
else
vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
+ /* vgpu_lock already hold by emulate mmio r/w */
+ mutex_unlock(&vgpu->vgpu_lock);
intel_gvt_check_vblank_emulation(vgpu->gvt);
+ mutex_lock(&vgpu->vgpu_lock);
return 0;
}
@@ -857,7 +889,8 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
data = vgpu_vreg(vgpu, offset);
if ((IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv))
+ || IS_KABYLAKE(vgpu->gvt->dev_priv)
+ || IS_BROXTON(vgpu->gvt->dev_priv))
&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
/* SKL DPB/C/D aux ctl register changed */
return 0;
@@ -1209,8 +1242,8 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
ret = handle_g2v_notification(vgpu, data);
break;
/* add xhot and yhot to handled list to avoid error log */
- case 0x78830:
- case 0x78834:
+ case _vgtif_reg(cursor_x_hot):
+ case _vgtif_reg(cursor_y_hot):
case _vgtif_reg(pdp[0].lo):
case _vgtif_reg(pdp[0].hi):
case _vgtif_reg(pdp[1].lo):
@@ -1369,6 +1402,16 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
*data0 = 0x1e1a1100;
else
*data0 = 0x61514b3d;
+ } else if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+ /**
+ * "Read memory latency" command on gen9.
+ * Below memory latency values are read
+ * from Broxton MRB.
+ */
+ if (!*data0)
+ *data0 = 0x16080707;
+ else
+ *data0 = 0x16161616;
}
break;
case SKL_PCODE_CDCLK_CONTROL:
@@ -1426,8 +1469,11 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
{
u32 v = *(u32 *)p_data;
- v &= (1 << 31) | (1 << 29) | (1 << 9) |
- (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
+ if (IS_BROXTON(vgpu->gvt->dev_priv))
+ v &= (1 << 31) | (1 << 29);
+ else
+ v &= (1 << 31) | (1 << 29) | (1 << 9) |
+ (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
v |= (v >> 1);
return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
@@ -1447,6 +1493,109 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
+static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ if (v & BXT_DE_PLL_PLL_ENABLE)
+ v |= BXT_DE_PLL_LOCK;
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ if (v & PORT_PLL_ENABLE)
+ v |= PORT_PLL_LOCK;
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+ u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
+
+ vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
+ vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
+ vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = vgpu_vreg(vgpu, offset);
+
+ v &= ~UNIQUE_TRANGE_EN_METHOD;
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+}
+
+static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) {
+ vgpu_vreg(vgpu, offset - 0x600) = v;
+ vgpu_vreg(vgpu, offset - 0x800) = v;
+ } else {
+ vgpu_vreg(vgpu, offset - 0x400) = v;
+ vgpu_vreg(vgpu, offset - 0x600) = v;
+ }
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ if (v & BIT(0)) {
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
+ ~PHY_RESERVED;
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
+ PHY_POWER_GOOD;
+ }
+
+ if (v & BIT(1)) {
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
+ ~PHY_RESERVED;
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
+ PHY_POWER_GOOD;
+ }
+
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ vgpu_vreg(vgpu, offset) = 0;
+ return 0;
+}
+
static int mmio_read_from_hw(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
@@ -1657,7 +1806,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL,
+ gamw_echo_dev_rw_ia_write);
+
MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
@@ -2670,17 +2821,17 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x45504), D_SKL_PLUS);
MMIO_D(_MMIO(0x45520), D_SKL_PLUS);
MMIO_D(_MMIO(0x46000), D_SKL_PLUS);
- MMIO_DH(_MMIO(0x46010), D_SKL | D_KBL, NULL, skl_lcpll_write);
- MMIO_DH(_MMIO(0x46014), D_SKL | D_KBL, NULL, skl_lcpll_write);
- MMIO_D(_MMIO(0x6C040), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6C048), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6C050), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6C044), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6C04C), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6C054), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6c058), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6c05c), D_SKL | D_KBL);
- MMIO_DH(_MMIO(0x6c060), D_SKL | D_KBL, dpll_status_read, NULL);
+ MMIO_DH(_MMIO(0x46010), D_SKL_PLUS, NULL, skl_lcpll_write);
+ MMIO_DH(_MMIO(0x46014), D_SKL_PLUS, NULL, skl_lcpll_write);
+ MMIO_D(_MMIO(0x6C040), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6C048), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6C050), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6C044), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6C04C), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6C054), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6c058), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS);
+ MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
@@ -2805,53 +2956,57 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
- MMIO_D(_MMIO(0x8f074), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x8f004), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x8f034), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x8f074), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x8f004), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x8f034), D_SKL_PLUS);
- MMIO_D(_MMIO(0xb11c), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0xb11c), D_SKL_PLUS);
- MMIO_D(_MMIO(0x51000), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x51000), D_SKL_PLUS);
MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS);
- MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
- MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
+ MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
+ NULL, NULL);
+ MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
+ NULL, NULL);
MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
MMIO_D(RC6_LOCATION, D_SKL_PLUS);
MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
/* TRTT */
- MMIO_DFH(_MMIO(0x4de0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4de4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4de8), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4dec), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4df0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4df4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write);
- MMIO_DH(_MMIO(0x4dfc), D_SKL | D_KBL, NULL, gen9_trtt_chicken_write);
+ MMIO_DFH(_MMIO(0x4de0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4de4), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4de8), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4dec), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4df0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4df4), D_SKL_PLUS, F_CMD_ACCESS,
+ NULL, gen9_trtte_write);
+ MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
- MMIO_D(_MMIO(0x45008), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x45008), D_SKL_PLUS);
- MMIO_D(_MMIO(0x46430), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
- MMIO_D(_MMIO(0x46520), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
- MMIO_D(_MMIO(0xc403c), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
MMIO_D(_MMIO(0xb004), D_SKL_PLUS);
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
- MMIO_D(_MMIO(0x1082c0), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x4068), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x67054), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6e560), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x6e554), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x2b20), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x65f00), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x65f08), D_SKL | D_KBL);
- MMIO_D(_MMIO(0x320f0), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x1082c0), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x6e554), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x2b20), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x65f00), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x65f08), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x320f0), D_SKL_PLUS);
MMIO_D(_MMIO(0x70034), D_SKL_PLUS);
MMIO_D(_MMIO(0x71034), D_SKL_PLUS);
@@ -2869,11 +3024,188 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS,
+ MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_D(_MMIO(0x4ab8), D_KBL);
- MMIO_D(_MMIO(0x2248), D_SKL_PLUS | D_KBL);
+ MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
+
+ return 0;
+}
+
+static int init_bxt_mmio_info(struct intel_gvt *gvt)
+{
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
+
+ MMIO_D(GEN7_SAMPLER_INSTDONE, D_BXT);
+ MMIO_D(GEN7_ROW_INSTDONE, D_BXT);
+ MMIO_D(GEN8_FAULT_TLB_DATA0, D_BXT);
+ MMIO_D(GEN8_FAULT_TLB_DATA1, D_BXT);
+ MMIO_D(ERROR_GEN6, D_BXT);
+ MMIO_D(DONE_REG, D_BXT);
+ MMIO_D(EIR, D_BXT);
+ MMIO_D(PGTBL_ER, D_BXT);
+ MMIO_D(_MMIO(0x4194), D_BXT);
+ MMIO_D(_MMIO(0x4294), D_BXT);
+ MMIO_D(_MMIO(0x4494), D_BXT);
+
+ MMIO_RING_D(RING_PSMI_CTL, D_BXT);
+ MMIO_RING_D(RING_DMA_FADD, D_BXT);
+ MMIO_RING_D(RING_DMA_FADD_UDW, D_BXT);
+ MMIO_RING_D(RING_IPEHR, D_BXT);
+ MMIO_RING_D(RING_INSTPS, D_BXT);
+ MMIO_RING_D(RING_BBADDR_UDW, D_BXT);
+ MMIO_RING_D(RING_BBSTATE, D_BXT);
+ MMIO_RING_D(RING_IPEIR, D_BXT);
+
+ MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_BXT, NULL, NULL);
+
+ MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write);
+ MMIO_D(BXT_RP_STATE_CAP, D_BXT);
+ MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT,
+ NULL, bxt_phy_ctl_family_write);
+ MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT,
+ NULL, bxt_phy_ctl_family_write);
+ MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT);
+ MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT);
+ MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT);
+ MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT,
+ NULL, bxt_port_pll_enable_write);
+ MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT,
+ NULL, bxt_port_pll_enable_write);
+ MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL,
+ bxt_port_pll_enable_write);
+
+ MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0), D_BXT);
+
+ MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1), D_BXT);
+ MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1), D_BXT);
+
+ MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
+ NULL, bxt_pcs_dw12_grp_write);
+ MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT,
+ bxt_port_tx_dw3_read, NULL);
+ MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10), D_BXT);
+
+ MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
+ NULL, bxt_pcs_dw12_grp_write);
+ MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT,
+ bxt_port_tx_dw3_read, NULL);
+ MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10), D_BXT);
+
+ MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
+ NULL, bxt_pcs_dw12_grp_write);
+ MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT,
+ bxt_port_tx_dw3_read, NULL);
+ MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9), D_BXT);
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10), D_BXT);
+
+ MMIO_D(BXT_DE_PLL_CTL, D_BXT);
+ MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
+ MMIO_D(BXT_DSI_PLL_CTL, D_BXT);
+ MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
+
+ MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
+
+ MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
+ MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
+ MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
+
+ MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
+ MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
+
+ MMIO_D(RC6_CTX_BASE, D_BXT);
+
+ MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
+ MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
+ MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
+ MMIO_D(GEN6_GFXPAUSE, D_BXT);
+ MMIO_D(GEN8_L3SQCREG1, D_BXT);
+
+ MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
return 0;
}
@@ -2965,6 +3297,16 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
ret = init_skl_mmio_info(gvt);
if (ret)
goto err;
+ } else if (IS_BROXTON(dev_priv)) {
+ ret = init_broadwell_mmio_info(gvt);
+ if (ret)
+ goto err;
+ ret = init_skl_mmio_info(gvt);
+ if (ret)
+ goto err;
+ ret = init_bxt_mmio_info(gvt);
+ if (ret)
+ goto err;
}
gvt->mmio.mmio_block = mmio_blocks;
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index f6dd9f717888..5af11cf1b482 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -53,7 +53,7 @@ struct intel_gvt_mpt {
unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn,
- dma_addr_t *dma_addr);
+ unsigned long size, dma_addr_t *dma_addr);
void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 7a041b368f68..5daa23ae566b 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -350,7 +350,8 @@ static void update_upstream_irq(struct intel_vgpu *vgpu,
clear_bits |= (1 << bit);
}
- WARN_ON(!up_irq_info);
+ if (WARN_ON(!up_irq_info))
+ return;
if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) {
u32 isr = i915_mmio_reg_offset(up_irq_info->reg_base);
@@ -580,7 +581,9 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
- } else if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) {
+ } else if (IS_SKYLAKE(gvt->dev_priv)
+ || IS_KABYLAKE(gvt->dev_priv)
+ || IS_BROXTON(gvt->dev_priv)) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
@@ -690,14 +693,8 @@ int intel_gvt_init_irq(struct intel_gvt *gvt)
gvt_dbg_core("init irq framework\n");
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv)) {
- irq->ops = &gen8_irq_ops;
- irq->irq_map = gen8_irq_map;
- } else {
- WARN_ON(1);
- return -ENODEV;
- }
+ irq->ops = &gen8_irq_ops;
+ irq->irq_map = gen8_irq_map;
/* common event initialization */
init_events(irq);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index df4e4a07db3d..4d2f53ae9f0f 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -94,6 +94,7 @@ struct gvt_dma {
struct rb_node dma_addr_node;
gfn_t gfn;
dma_addr_t dma_addr;
+ unsigned long size;
struct kref ref;
};
@@ -106,51 +107,103 @@ static int kvmgt_guest_init(struct mdev_device *mdev);
static void intel_vgpu_release_work(struct work_struct *work);
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
-static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
- dma_addr_t *dma_addr)
+static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
+ unsigned long size)
{
- struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
- struct page *page;
- unsigned long pfn;
+ int total_pages;
+ int npage;
int ret;
- /* Pin the page first. */
- ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1,
- IOMMU_READ | IOMMU_WRITE, &pfn);
- if (ret != 1) {
- gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
- gfn, ret);
- return -EINVAL;
+ total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
+
+ for (npage = 0; npage < total_pages; npage++) {
+ unsigned long cur_gfn = gfn + npage;
+
+ ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1);
+ WARN_ON(ret != 1);
}
+}
- if (!pfn_valid(pfn)) {
- gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
- vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
- return -EINVAL;
+/* Pin a normal or compound guest page for dma. */
+static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
+ unsigned long size, struct page **page)
+{
+ unsigned long base_pfn = 0;
+ int total_pages;
+ int npage;
+ int ret;
+
+ total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
+ /*
+ * We pin the pages one-by-one to avoid allocating a big arrary
+ * on stack to hold pfns.
+ */
+ for (npage = 0; npage < total_pages; npage++) {
+ unsigned long cur_gfn = gfn + npage;
+ unsigned long pfn;
+
+ ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1,
+ IOMMU_READ | IOMMU_WRITE, &pfn);
+ if (ret != 1) {
+ gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
+ cur_gfn, ret);
+ goto err;
+ }
+
+ if (!pfn_valid(pfn)) {
+ gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
+ npage++;
+ ret = -EFAULT;
+ goto err;
+ }
+
+ if (npage == 0)
+ base_pfn = pfn;
+ else if (base_pfn + npage != pfn) {
+ gvt_vgpu_err("The pages are not continuous\n");
+ ret = -EINVAL;
+ npage++;
+ goto err;
+ }
}
+ *page = pfn_to_page(base_pfn);
+ return 0;
+err:
+ gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
+ return ret;
+}
+
+static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
+ dma_addr_t *dma_addr, unsigned long size)
+{
+ struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct page *page = NULL;
+ int ret;
+
+ ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
+ if (ret)
+ return ret;
+
/* Setup DMA mapping. */
- page = pfn_to_page(pfn);
- *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, *dma_addr)) {
- gvt_vgpu_err("DMA mapping failed for gfn 0x%lx\n", gfn);
- vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
- return -ENOMEM;
+ *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
+ ret = dma_mapping_error(dev, *dma_addr);
+ if (ret) {
+ gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
+ page_to_pfn(page), ret);
+ gvt_unpin_guest_page(vgpu, gfn, size);
}
- return 0;
+ return ret;
}
static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
- dma_addr_t dma_addr)
+ dma_addr_t dma_addr, unsigned long size)
{
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
- int ret;
- dma_unmap_page(dev, dma_addr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
- WARN_ON(ret != 1);
+ dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
+ gvt_unpin_guest_page(vgpu, gfn, size);
}
static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
@@ -191,7 +244,7 @@ static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
}
static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
- dma_addr_t dma_addr)
+ dma_addr_t dma_addr, unsigned long size)
{
struct gvt_dma *new, *itr;
struct rb_node **link, *parent = NULL;
@@ -203,6 +256,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
new->vgpu = vgpu;
new->gfn = gfn;
new->dma_addr = dma_addr;
+ new->size = size;
kref_init(&new->ref);
/* gfn_cache maps gfn to struct gvt_dma. */
@@ -260,7 +314,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
break;
}
dma = rb_entry(node, struct gvt_dma, gfn_node);
- gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr);
+ gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
__gvt_cache_remove_entry(vgpu, dma);
mutex_unlock(&vgpu->vdev.cache_lock);
}
@@ -515,7 +569,8 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
if (!entry)
continue;
- gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr);
+ gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
+ entry->size);
__gvt_cache_remove_entry(vgpu, entry);
}
mutex_unlock(&vgpu->vdev.cache_lock);
@@ -1648,7 +1703,7 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
}
int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
- dma_addr_t *dma_addr)
+ unsigned long size, dma_addr_t *dma_addr)
{
struct kvmgt_guest_info *info;
struct intel_vgpu *vgpu;
@@ -1665,11 +1720,11 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
entry = __gvt_cache_find_gfn(info->vgpu, gfn);
if (!entry) {
- ret = gvt_dma_map_page(vgpu, gfn, dma_addr);
+ ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
if (ret)
goto err_unlock;
- ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr);
+ ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
if (ret)
goto err_unmap;
} else {
@@ -1681,7 +1736,7 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
return 0;
err_unmap:
- gvt_dma_unmap_page(vgpu, gfn, *dma_addr);
+ gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
err_unlock:
mutex_unlock(&info->vgpu->vdev.cache_lock);
return ret;
@@ -1691,7 +1746,8 @@ static void __gvt_dma_release(struct kref *ref)
{
struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
- gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr);
+ gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr,
+ entry->size);
__gvt_cache_remove_entry(entry->vgpu, entry);
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index b31eb36fc102..994366035364 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -67,7 +67,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
return;
gvt = vgpu->gvt;
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
if (reg_is_mmio(gvt, offset)) {
if (read)
@@ -85,7 +85,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
memcpy(pt, p_data, bytes);
}
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
}
/**
@@ -109,7 +109,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
return 0;
}
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
@@ -156,7 +156,7 @@ err:
gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
offset, bytes);
out:
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
@@ -182,7 +182,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
return 0;
}
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
@@ -220,7 +220,7 @@ err:
gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
bytes);
out:
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index dac8c6401e26..1ffc69eba30e 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -42,15 +42,16 @@ struct intel_vgpu;
#define D_BDW (1 << 0)
#define D_SKL (1 << 1)
#define D_KBL (1 << 2)
+#define D_BXT (1 << 3)
-#define D_GEN9PLUS (D_SKL | D_KBL)
-#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL)
+#define D_GEN9PLUS (D_SKL | D_KBL | D_BXT)
+#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL | D_BXT)
-#define D_SKL_PLUS (D_SKL | D_KBL)
-#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL)
+#define D_SKL_PLUS (D_SKL | D_KBL | D_BXT)
+#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL | D_BXT)
#define D_PRE_SKL (D_BDW)
-#define D_ALL (D_BDW | D_SKL | D_KBL)
+#define D_ALL (D_BDW | D_SKL | D_KBL | D_BXT)
typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *,
unsigned int);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 5ca9caf7552a..42e1e6bdcc2c 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -364,7 +364,8 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
*/
fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ | FW_REG_WRITE);
- if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
+ if (ring_id == RCS && (IS_SKYLAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)))
fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(dev_priv, fw);
@@ -401,7 +402,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
- if (IS_KABYLAKE(dev_priv) && ring_id == RCS)
+ if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) && ring_id == RCS)
return;
if (!pre && !gen9_render_mocs.initialized)
@@ -446,9 +447,9 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
#define CTX_CONTEXT_CONTROL_VAL 0x03
-bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
+bool is_inhibit_context(struct intel_context *ce)
{
- u32 *reg_state = ctx->__engine[ring_id].lrc_reg_state;
+ const u32 *reg_state = ce->lrc_reg_state;
u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
@@ -467,7 +468,9 @@ static void switch_mmio(struct intel_vgpu *pre,
u32 old_v, new_v;
dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)
+ || IS_BROXTON(dev_priv))
switch_mocs(pre, next, ring_id);
for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
@@ -479,7 +482,8 @@ static void switch_mmio(struct intel_vgpu *pre,
* state image on kabylake, it's initialized by lri command and
* save or restore with context together.
*/
- if (IS_KABYLAKE(dev_priv) && mmio->in_context)
+ if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+ && mmio->in_context)
continue;
// save
@@ -501,7 +505,7 @@ static void switch_mmio(struct intel_vgpu *pre,
* itself.
*/
if (mmio->in_context &&
- !is_inhibit_context(s->shadow_ctx, ring_id))
+ !is_inhibit_context(&s->shadow_ctx->__engine[ring_id]))
continue;
if (mmio->mask)
@@ -574,7 +578,9 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{
struct engine_mmio *mmio;
- if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
+ if (IS_SKYLAKE(gvt->dev_priv) ||
+ IS_KABYLAKE(gvt->dev_priv) ||
+ IS_BROXTON(gvt->dev_priv))
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
else
gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index 0439eb8057a8..5c3b9ff9f96a 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -49,7 +49,7 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
-bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id);
+bool is_inhibit_context(struct intel_context *ce);
int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
struct i915_request *req);
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 32ffcd566cdd..67f19992b226 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -230,17 +230,18 @@ static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
/**
* intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page
* @vgpu: a vGPU
- * @gpfn: guest pfn
+ * @gfn: guest pfn
+ * @size: page size
* @dma_addr: retrieve allocated dma addr
*
* Returns:
* 0 on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_dma_map_guest_page(
- struct intel_vgpu *vgpu, unsigned long gfn,
+ struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size,
dma_addr_t *dma_addr)
{
- return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn,
+ return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size,
dma_addr);
}
diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c
index 53e2bd79c97d..256d0db8bbb1 100644
--- a/drivers/gpu/drm/i915/gvt/page_track.c
+++ b/drivers/gpu/drm/i915/gvt/page_track.c
@@ -157,11 +157,10 @@ int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
void *data, unsigned int bytes)
{
- struct intel_gvt *gvt = vgpu->gvt;
struct intel_vgpu_page_track *page_track;
int ret = 0;
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT);
if (!page_track) {
@@ -179,6 +178,6 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
}
out:
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index d053cbe1dc94..09d7bb72b4ff 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -228,7 +228,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
ktime_t cur_time;
- mutex_lock(&gvt->lock);
+ mutex_lock(&gvt->sched_lock);
cur_time = ktime_get();
if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
@@ -244,7 +244,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
tbs_sched_func(sched_data);
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&gvt->sched_lock);
}
static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
@@ -359,39 +359,65 @@ static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
{
+ int ret;
+
+ mutex_lock(&gvt->sched_lock);
gvt->scheduler.sched_ops = &tbs_schedule_ops;
+ ret = gvt->scheduler.sched_ops->init(gvt);
+ mutex_unlock(&gvt->sched_lock);
- return gvt->scheduler.sched_ops->init(gvt);
+ return ret;
}
void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
{
+ mutex_lock(&gvt->sched_lock);
gvt->scheduler.sched_ops->clean(gvt);
+ mutex_unlock(&gvt->sched_lock);
}
+/* for per-vgpu scheduler policy, there are 2 per-vgpu data:
+ * sched_data, and sched_ctl. We see these 2 data as part of
+ * the global scheduler which are proteced by gvt->sched_lock.
+ * Caller should make their decision if the vgpu_lock should
+ * be hold outside.
+ */
+
int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
{
- return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
+ int ret;
+
+ mutex_lock(&vgpu->gvt->sched_lock);
+ ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
+ mutex_unlock(&vgpu->gvt->sched_lock);
+
+ return ret;
}
void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
{
+ mutex_lock(&vgpu->gvt->sched_lock);
vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
+ mutex_unlock(&vgpu->gvt->sched_lock);
}
void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
{
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+ mutex_lock(&vgpu->gvt->sched_lock);
if (!vgpu_data->active) {
gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
}
+ mutex_unlock(&vgpu->gvt->sched_lock);
}
void intel_gvt_kick_schedule(struct intel_gvt *gvt)
{
+ mutex_lock(&gvt->sched_lock);
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
+ mutex_unlock(&gvt->sched_lock);
}
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
@@ -406,6 +432,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
+ mutex_lock(&vgpu->gvt->sched_lock);
scheduler->sched_ops->stop_schedule(vgpu);
if (scheduler->next_vgpu == vgpu)
@@ -425,4 +452,5 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
+ mutex_unlock(&vgpu->gvt->sched_lock);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index c2d183b91500..b0e566956b8d 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -45,20 +45,16 @@ static void set_context_pdp_root_pointer(
struct execlist_ring_context *ring_context,
u32 pdp[8])
{
- struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
int i;
for (i = 0; i < 8; i++)
- pdp_pair[i].val = pdp[7 - i];
+ ring_context->pdps[i].val = pdp[7 - i];
}
static void update_shadow_pdps(struct intel_vgpu_workload *workload)
{
- struct intel_vgpu *vgpu = workload->vgpu;
- int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->__engine[ring_id].state->obj;
+ workload->req->hw_context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
@@ -128,9 +124,8 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->__engine[ring_id].state->obj;
+ workload->req->hw_context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *dst;
@@ -205,7 +200,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
static inline bool is_gvt_request(struct i915_request *req)
{
- return i915_gem_context_force_single_submission(req->ctx);
+ return i915_gem_context_force_single_submission(req->gem_context);
}
static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
@@ -280,10 +275,8 @@ static int shadow_context_status_change(struct notifier_block *nb,
return NOTIFY_OK;
}
-static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static void shadow_context_descriptor_update(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
u64 desc = 0;
desc = ce->lrc_desc;
@@ -292,7 +285,7 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
* like GEN8_CTX_* cached in desc_template
*/
desc &= U64_MAX << 12;
- desc |= ctx->desc_template & ((1ULL << 12) - 1);
+ desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);
ce->lrc_desc = desc;
}
@@ -300,12 +293,12 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
+ struct i915_request *req = workload->req;
void *shadow_ring_buffer_va;
u32 *cs;
- struct i915_request *req = workload->req;
- if (IS_KABYLAKE(req->i915) &&
- is_inhibit_context(req->ctx, req->engine->id))
+ if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915))
+ && is_inhibit_context(req->hw_context))
intel_vgpu_restore_inhibit_context(vgpu, req);
/* allocate shadow ring buffer */
@@ -353,92 +346,67 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- int ring_id = workload->ring_id;
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct intel_ring *ring;
+ struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
+ struct intel_context *ce;
+ struct i915_request *rq;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- if (workload->shadowed)
+ if (workload->req)
return 0;
+ /* pin shadow context by gvt even the shadow context will be pinned
+ * when i915 alloc request. That is because gvt will update the guest
+ * context from shadow context when workload is completed, and at that
+ * moment, i915 may already unpined the shadow context to make the
+ * shadow_ctx pages invalid. So gvt need to pin itself. After update
+ * the guest context, gvt can unpin the shadow_ctx safely.
+ */
+ ce = intel_context_pin(shadow_ctx, engine);
+ if (IS_ERR(ce)) {
+ gvt_vgpu_err("fail to pin shadow context\n");
+ return PTR_ERR(ce);
+ }
+
shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
- if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
- shadow_context_descriptor_update(shadow_ctx,
- dev_priv->engine[ring_id]);
+ if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
+ shadow_context_descriptor_update(ce);
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret)
- goto err_scan;
+ goto err_unpin;
if ((workload->ring_id == RCS) &&
(workload->wa_ctx.indirect_ctx.size != 0)) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
- goto err_scan;
+ goto err_shadow;
}
- /* pin shadow context by gvt even the shadow context will be pinned
- * when i915 alloc request. That is because gvt will update the guest
- * context from shadow context when workload is completed, and at that
- * moment, i915 may already unpined the shadow context to make the
- * shadow_ctx pages invalid. So gvt need to pin itself. After update
- * the guest context, gvt can unpin the shadow_ctx safely.
- */
- ring = intel_context_pin(shadow_ctx, engine);
- if (IS_ERR(ring)) {
- ret = PTR_ERR(ring);
- gvt_vgpu_err("fail to pin shadow context\n");
+ rq = i915_request_alloc(engine, shadow_ctx);
+ if (IS_ERR(rq)) {
+ gvt_vgpu_err("fail to allocate gem request\n");
+ ret = PTR_ERR(rq);
goto err_shadow;
}
+ workload->req = i915_request_get(rq);
ret = populate_shadow_context(workload);
if (ret)
- goto err_unpin;
- workload->shadowed = true;
- return 0;
+ goto err_req;
-err_unpin:
- intel_context_unpin(shadow_ctx, engine);
+ return 0;
+err_req:
+ rq = fetch_and_zero(&workload->req);
+ i915_request_put(rq);
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
-err_scan:
- return ret;
-}
-
-static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
-{
- int ring_id = workload->ring_id;
- struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct i915_request *rq;
- struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_vgpu_submission *s = &vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
- int ret;
-
- rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
- if (IS_ERR(rq)) {
- gvt_vgpu_err("fail to allocate gem request\n");
- ret = PTR_ERR(rq);
- goto err_unpin;
- }
-
- gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
-
- workload->req = i915_request_get(rq);
- ret = copy_workload_to_ring_buffer(workload);
- if (ret)
- goto err_unpin;
- return 0;
-
err_unpin:
- intel_context_unpin(shadow_ctx, engine);
- release_shadow_wa_ctx(&workload->wa_ctx);
+ intel_context_unpin(ce);
return ret;
}
@@ -508,7 +476,11 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
i915_gem_obj_finish_shmem_access(bb->obj);
bb->accessing = false;
- i915_vma_move_to_active(bb->vma, workload->req, 0);
+ ret = i915_vma_move_to_active(bb->vma,
+ workload->req,
+ 0);
+ if (ret)
+ goto err;
}
}
return 0;
@@ -517,21 +489,13 @@ err:
return ret;
}
-static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
- struct intel_vgpu_workload *workload = container_of(wa_ctx,
- struct intel_vgpu_workload,
- wa_ctx);
- int ring_id = workload->ring_id;
- struct intel_vgpu_submission *s = &workload->vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
- struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->__engine[ring_id].state->obj;
- struct execlist_ring_context *shadow_ring_context;
- struct page *page;
-
- page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
- shadow_ring_context = kmap_atomic(page);
+ struct intel_vgpu_workload *workload =
+ container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
+ struct i915_request *rq = workload->req;
+ struct execlist_ring_context *shadow_ring_context =
+ (struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
shadow_ring_context->bb_per_ctx_ptr.val =
(shadow_ring_context->bb_per_ctx_ptr.val &
@@ -539,9 +503,6 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
shadow_ring_context->rcs_indirect_ctx.val =
(shadow_ring_context->rcs_indirect_ctx.val &
(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
-
- kunmap_atomic(shadow_ring_context);
- return 0;
}
static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
@@ -633,7 +594,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
goto err_unpin_mm;
}
- ret = intel_gvt_generate_request(workload);
+ ret = copy_workload_to_ring_buffer(workload);
if (ret) {
gvt_vgpu_err("fail to generate request\n");
goto err_unpin_mm;
@@ -670,16 +631,14 @@ err_unpin_mm:
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_vgpu_submission *s = &vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ring_id = workload->ring_id;
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- int ret = 0;
+ int ret;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
ring_id, workload);
+ mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_scan_and_shadow_workload(workload);
@@ -687,10 +646,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
goto out;
ret = prepare_workload(workload);
- if (ret) {
- intel_context_unpin(shadow_ctx, engine);
- goto out;
- }
out:
if (ret)
@@ -704,6 +659,7 @@ out:
}
mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
@@ -713,7 +669,7 @@ static struct intel_vgpu_workload *pick_next_workload(
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
- mutex_lock(&gvt->lock);
+ mutex_lock(&gvt->sched_lock);
/*
* no current vgpu / will be scheduled out / no workload
@@ -759,33 +715,29 @@ static struct intel_vgpu_workload *pick_next_workload(
atomic_inc(&workload->vgpu->submission.running_workload_num);
out:
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&gvt->sched_lock);
return workload;
}
static void update_guest_context(struct intel_vgpu_workload *workload)
{
+ struct i915_request *rq = workload->req;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
- struct intel_vgpu_submission *s = &vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
- int ring_id = workload->ring_id;
- struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->__engine[ring_id].state->obj;
+ struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *src;
unsigned long context_gpa, context_page_num;
int i;
- gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
- workload->ctx_desc.lrca);
-
- context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
+ gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
+ workload->ctx_desc.lrca);
+ context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+ if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
context_page_num = 19;
i = 2;
@@ -858,19 +810,17 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
scheduler->current_workload[ring_id];
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_request *rq = workload->req;
int event;
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
+ mutex_lock(&gvt->sched_lock);
/* For the workload w/ request, needs to wait for the context
* switch to make sure request is completed.
* For the workload w/o request, directly complete the workload.
*/
- if (workload->req) {
- struct drm_i915_private *dev_priv =
- workload->vgpu->gvt->dev_priv;
- struct intel_engine_cs *engine =
- dev_priv->engine[workload->ring_id];
+ if (rq) {
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
@@ -886,8 +836,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload->status = 0;
}
- i915_request_put(fetch_and_zero(&workload->req));
-
if (!workload->status && !(vgpu->resetting_eng &
ENGINE_MASK(ring_id))) {
update_guest_context(workload);
@@ -896,10 +844,13 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
INTEL_GVT_EVENT_MAX)
intel_vgpu_trigger_virtual_event(vgpu, event);
}
- mutex_lock(&dev_priv->drm.struct_mutex);
+
/* unpin shadow ctx as the shadow_ctx update is done */
- intel_context_unpin(s->shadow_ctx, engine);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&rq->i915->drm.struct_mutex);
+ intel_context_unpin(rq->hw_context);
+ mutex_unlock(&rq->i915->drm.struct_mutex);
+
+ i915_request_put(fetch_and_zero(&workload->req));
}
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -939,7 +890,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
if (gvt->scheduler.need_reschedule)
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&gvt->sched_lock);
+ mutex_unlock(&vgpu->vgpu_lock);
}
struct workload_thread_param {
@@ -957,7 +909,8 @@ static int workload_thread(void *priv)
struct intel_vgpu *vgpu = NULL;
int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv);
+ || IS_KABYLAKE(gvt->dev_priv)
+ || IS_BROXTON(gvt->dev_priv);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
kfree(p);
@@ -991,9 +944,7 @@ static int workload_thread(void *priv)
intel_uncore_forcewake_get(gvt->dev_priv,
FORCEWAKE_ALL);
- mutex_lock(&gvt->lock);
ret = dispatch_workload(workload);
- mutex_unlock(&gvt->lock);
if (ret) {
vgpu = workload->vgpu;
@@ -1270,7 +1221,6 @@ alloc_workload(struct intel_vgpu *vgpu)
atomic_set(&workload->shadow_ctx_active, 0);
workload->status = -EINPROGRESS;
- workload->shadowed = false;
workload->vgpu = vgpu;
return workload;
@@ -1285,7 +1235,7 @@ static void read_guest_pdps(struct intel_vgpu *vgpu,
u64 gpa;
int i;
- gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
+ gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
for (i = 0; i < 8; i++)
intel_gvt_hypervisor_read_gpa(vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 6c644782193e..21eddab4a9cd 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -83,7 +83,6 @@ struct intel_vgpu_workload {
struct i915_request *req;
/* if this workload has been dispatched to i915? */
bool dispatched;
- bool shadowed;
int status;
struct intel_vgpu_mm *shadow_mm;
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 572a18c2bfb5..f6fa916517c3 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -46,6 +46,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
+ vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
vgpu_aperture_gmadr_base(vgpu);
@@ -58,6 +59,9 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
+ vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX;
+ vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX;
+
gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
@@ -223,22 +227,20 @@ void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
*/
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
{
- struct intel_gvt *gvt = vgpu->gvt;
-
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
vgpu->active = false;
if (atomic_read(&vgpu->submission.running_workload_num)) {
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
intel_gvt_wait_vgpu_idle(vgpu);
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
}
intel_vgpu_stop_schedule(vgpu);
intel_vgpu_dmabuf_cleanup(vgpu);
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
}
/**
@@ -252,14 +254,11 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
WARN(vgpu->active, "vGPU is still active!\n");
intel_gvt_debugfs_remove_vgpu(vgpu);
- idr_remove(&gvt->vgpu_idr, vgpu->id);
- if (idr_is_empty(&gvt->vgpu_idr))
- intel_gvt_clean_irq(gvt);
intel_vgpu_clean_sched_policy(vgpu);
intel_vgpu_clean_submission(vgpu);
intel_vgpu_clean_display(vgpu);
@@ -269,10 +268,16 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_vgpu_free_resource(vgpu);
intel_vgpu_clean_mmio(vgpu);
intel_vgpu_dmabuf_cleanup(vgpu);
- vfree(vgpu);
+ mutex_unlock(&vgpu->vgpu_lock);
+ mutex_lock(&gvt->lock);
+ idr_remove(&gvt->vgpu_idr, vgpu->id);
+ if (idr_is_empty(&gvt->vgpu_idr))
+ intel_gvt_clean_irq(gvt);
intel_gvt_update_vgpu_types(gvt);
mutex_unlock(&gvt->lock);
+
+ vfree(vgpu);
}
#define IDLE_VGPU_IDR 0
@@ -298,6 +303,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
vgpu->id = IDLE_VGPU_IDR;
vgpu->gvt = gvt;
+ mutex_init(&vgpu->vgpu_lock);
for (i = 0; i < I915_NUM_ENGINES; i++)
INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
@@ -324,7 +330,10 @@ out_free_vgpu:
*/
void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
{
+ mutex_lock(&vgpu->vgpu_lock);
intel_vgpu_clean_sched_policy(vgpu);
+ mutex_unlock(&vgpu->vgpu_lock);
+
vfree(vgpu);
}
@@ -342,8 +351,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (!vgpu)
return ERR_PTR(-ENOMEM);
- mutex_lock(&gvt->lock);
-
ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
GFP_KERNEL);
if (ret < 0)
@@ -353,6 +360,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
vgpu->handle = param->handle;
vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight;
+ mutex_init(&vgpu->vgpu_lock);
INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
idr_init(&vgpu->object_idr);
@@ -400,8 +408,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
- mutex_unlock(&gvt->lock);
-
return vgpu;
out_clean_sched_policy:
@@ -424,7 +430,6 @@ out_clean_idr:
idr_remove(&gvt->vgpu_idr, vgpu->id);
out_free_vgpu:
vfree(vgpu);
- mutex_unlock(&gvt->lock);
return ERR_PTR(ret);
}
@@ -456,12 +461,12 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
+ mutex_lock(&gvt->lock);
vgpu = __intel_gvt_create_vgpu(gvt, &param);
- if (IS_ERR(vgpu))
- return vgpu;
-
- /* calculate left instance change for types */
- intel_gvt_update_vgpu_types(gvt);
+ if (!IS_ERR(vgpu))
+ /* calculate left instance change for types */
+ intel_gvt_update_vgpu_types(gvt);
+ mutex_unlock(&gvt->lock);
return vgpu;
}
@@ -473,7 +478,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
* @engine_mask: engines to reset for GT reset
*
* This function is called when user wants to reset a virtual GPU through
- * device model reset or GT reset. The caller should hold the gvt lock.
+ * device model reset or GT reset. The caller should hold the vgpu lock.
*
* vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
* the whole vGPU to default state as when it is created. This vGPU function
@@ -513,9 +518,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
* scheduler when the reset is triggered by current vgpu.
*/
if (scheduler->current_vgpu == NULL) {
- mutex_unlock(&gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
intel_gvt_wait_vgpu_idle(vgpu);
- mutex_lock(&gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
}
intel_vgpu_reset_submission(vgpu, resetting_eng);
@@ -555,7 +560,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
*/
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
{
- mutex_lock(&vgpu->gvt->lock);
+ mutex_lock(&vgpu->vgpu_lock);
intel_gvt_reset_vgpu_locked(vgpu, true, 0);
- mutex_unlock(&vgpu->gvt->lock);
+ mutex_unlock(&vgpu->vgpu_lock);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 13e7b9e4a6e6..f9ce35da4123 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -328,7 +328,7 @@ static int per_file_stats(int id, void *ptr, void *data)
} else {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
- if (ppgtt->base.file != stats->file_priv)
+ if (ppgtt->vm.file != stats->file_priv)
continue;
}
@@ -508,7 +508,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
dpy_count, dpy_size);
seq_printf(m, "%llu [%pa] gtt total\n",
- ggtt->base.total, &ggtt->mappable_end);
+ ggtt->vm.total, &ggtt->mappable_end);
seq_printf(m, "Supported page sizes: %s\n",
stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
buf, sizeof(buf)));
@@ -542,8 +542,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
struct i915_request,
client_link);
rcu_read_lock();
- task = pid_task(request && request->ctx->pid ?
- request->ctx->pid : file->pid,
+ task = pid_task(request && request->gem_context->pid ?
+ request->gem_context->pid : file->pid,
PIDTYPE_PID);
print_file_stats(m, task ? task->comm : "<unknown>", stats);
rcu_read_unlock();
@@ -1162,19 +1162,28 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
- pm_ier = I915_READ(GEN6_PMIER);
- pm_imr = I915_READ(GEN6_PMIMR);
- pm_isr = I915_READ(GEN6_PMISR);
- pm_iir = I915_READ(GEN6_PMIIR);
- pm_mask = I915_READ(GEN6_PMINTRMSK);
- } else {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+ pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
+ /*
+ * The equivalent to the PM ISR & IIR cannot be read
+ * without affecting the current state of the system
+ */
+ pm_isr = 0;
+ pm_iir = 0;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
pm_ier = I915_READ(GEN8_GT_IER(2));
pm_imr = I915_READ(GEN8_GT_IMR(2));
pm_isr = I915_READ(GEN8_GT_ISR(2));
pm_iir = I915_READ(GEN8_GT_IIR(2));
- pm_mask = I915_READ(GEN6_PMINTRMSK);
+ } else {
+ pm_ier = I915_READ(GEN6_PMIER);
+ pm_imr = I915_READ(GEN6_PMIMR);
+ pm_isr = I915_READ(GEN6_PMISR);
+ pm_iir = I915_READ(GEN6_PMIIR);
}
+ pm_mask = I915_READ(GEN6_PMINTRMSK);
+
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
seq_printf(m, "HW control enabled: %s\n",
@@ -1182,8 +1191,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "SW control enabled: %s\n",
yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
GEN6_RP_MEDIA_SW_MODE));
- seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
- pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
+
+ seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
+ pm_ier, pm_imr, pm_mask);
+ if (INTEL_GEN(dev_priv) <= 10)
+ seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
+ pm_isr, pm_iir);
seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
rps->pm_intrmsk_mbz);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
@@ -1205,7 +1218,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
seq_printf(m, "RP PREV UP: %d (%dus)\n",
rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
- seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
+ seq_printf(m, "Up threshold: %d%%\n",
+ rps->power.up_threshold);
seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
@@ -1213,7 +1227,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
- seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
+ seq_printf(m, "Down threshold: %d%%\n",
+ rps->power.down_threshold);
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
@@ -1346,11 +1361,12 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
engine->hangcheck.seqno, seqno[id],
intel_engine_last_submit(engine));
- seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
+ seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
yesno(intel_engine_has_waiter(engine)),
yesno(test_bit(engine->id,
&dev_priv->gpu_error.missed_irq_rings)),
- yesno(engine->hangcheck.stalled));
+ yesno(engine->hangcheck.stalled),
+ yesno(engine->hangcheck.wedged));
spin_lock_irq(&b->rb_lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
@@ -1645,11 +1661,6 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
else
seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
- if (fbc->work.scheduled)
- seq_printf(m, "FBC worker scheduled on vblank %llu, now %llu\n",
- fbc->work.scheduled_vblank,
- drm_crtc_vblank_count(&fbc->crtc->base));
-
if (intel_fbc_is_active(dev_priv)) {
u32 mask;
@@ -1895,7 +1906,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fbdev_fb->base.format->cpp[0] * 8,
fbdev_fb->base.modifier,
drm_framebuffer_read_refcount(&fbdev_fb->base));
- describe_obj(m, fbdev_fb->obj);
+ describe_obj(m, intel_fb_obj(&fbdev_fb->base));
seq_putc(m, '\n');
}
#endif
@@ -1913,7 +1924,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.format->cpp[0] * 8,
fb->base.modifier,
drm_framebuffer_read_refcount(&fb->base));
- describe_obj(m, fb->obj);
+ describe_obj(m, intel_fb_obj(&fb->base));
seq_putc(m, '\n');
}
mutex_unlock(&dev->mode_config.fb_lock);
@@ -2209,6 +2220,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
+ seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
seq_printf(m, "Frequency requested %d\n",
intel_gpu_freq(dev_priv, rps->cur_freq));
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
@@ -2252,13 +2264,13 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
- rps_power_to_str(rps->power));
+ rps_power_to_str(rps->power.mode));
seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
rpup && rpupei ? 100 * rpup / rpupei : 0,
- rps->up_threshold);
+ rps->power.up_threshold);
seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
- rps->down_threshold);
+ rps->power.down_threshold);
} else {
seq_puts(m, "\nRPS Autotuning inactive\n");
}
@@ -2523,7 +2535,7 @@ static int i915_guc_log_level_get(void *data, u64 *val)
if (!USES_GUC(dev_priv))
return -ENODEV;
- *val = intel_guc_log_level_get(&dev_priv->guc.log);
+ *val = intel_guc_log_get_level(&dev_priv->guc.log);
return 0;
}
@@ -2535,7 +2547,7 @@ static int i915_guc_log_level_set(void *data, u64 val)
if (!USES_GUC(dev_priv))
return -ENODEV;
- return intel_guc_log_level_set(&dev_priv->guc.log, val);
+ return intel_guc_log_set_level(&dev_priv->guc.log, val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
@@ -2583,31 +2595,9 @@ static const struct file_operations i915_guc_log_relay_fops = {
.release = i915_guc_log_relay_release,
};
-static const char *psr2_live_status(u32 val)
-{
- static const char * const live_status[] = {
- "IDLE",
- "CAPTURE",
- "CAPTURE_FS",
- "SLEEP",
- "BUFON_FW",
- "ML_UP",
- "SU_STANDBY",
- "FAST_SLEEP",
- "DEEP_SLEEP",
- "BUF_ON",
- "TG_ON"
- };
-
- val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
- if (val < ARRAY_SIZE(live_status))
- return live_status[val];
-
- return "unknown";
-}
-
-static const char *psr_sink_status(u8 val)
+static int i915_psr_sink_status_show(struct seq_file *m, void *data)
{
+ u8 val;
static const char * const sink_status[] = {
"inactive",
"transition to active, capture and display",
@@ -2616,22 +2606,94 @@ static const char *psr_sink_status(u8 val)
"transition to inactive, capture and display, timing re-sync",
"reserved",
"reserved",
- "sink internal error"
+ "sink internal error",
};
+ struct drm_connector *connector = m->private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_dp *intel_dp =
+ enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ int ret;
- val &= DP_PSR_SINK_STATE_MASK;
- if (val < ARRAY_SIZE(sink_status))
- return sink_status[val];
+ if (!CAN_PSR(dev_priv)) {
+ seq_puts(m, "PSR Unsupported\n");
+ return -ENODEV;
+ }
- return "unknown";
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
+
+ if (ret == 1) {
+ const char *str = "unknown";
+
+ val &= DP_PSR_SINK_STATE_MASK;
+ if (val < ARRAY_SIZE(sink_status))
+ str = sink_status[val];
+ seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
+ } else {
+ return ret;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
+
+static void
+psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
+{
+ u32 val, psr_status;
+
+ if (dev_priv->psr.psr2_enabled) {
+ static const char * const live_status[] = {
+ "IDLE",
+ "CAPTURE",
+ "CAPTURE_FS",
+ "SLEEP",
+ "BUFON_FW",
+ "ML_UP",
+ "SU_STANDBY",
+ "FAST_SLEEP",
+ "DEEP_SLEEP",
+ "BUF_ON",
+ "TG_ON"
+ };
+ psr_status = I915_READ(EDP_PSR2_STATUS);
+ val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
+ EDP_PSR2_STATUS_STATE_SHIFT;
+ if (val < ARRAY_SIZE(live_status)) {
+ seq_printf(m, "Source PSR status: 0x%x [%s]\n",
+ psr_status, live_status[val]);
+ return;
+ }
+ } else {
+ static const char * const live_status[] = {
+ "IDLE",
+ "SRDONACK",
+ "SRDENT",
+ "BUFOFF",
+ "BUFON",
+ "AUXACK",
+ "SRDOFFACK",
+ "SRDENT_ON",
+ };
+ psr_status = I915_READ(EDP_PSR_STATUS);
+ val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
+ EDP_PSR_STATUS_STATE_SHIFT;
+ if (val < ARRAY_SIZE(live_status)) {
+ seq_printf(m, "Source PSR status: 0x%x [%s]\n",
+ psr_status, live_status[val]);
+ return;
+ }
+ }
+
+ seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
}
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
u32 psrperf = 0;
- u32 stat[3];
- enum pipe pipe;
bool enabled = false;
bool sink_support;
@@ -2649,50 +2711,18 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
dev_priv->psr.busy_frontbuffer_bits);
- seq_printf(m, "Re-enable work scheduled: %s\n",
- yesno(work_busy(&dev_priv->psr.work.work)));
-
- if (HAS_DDI(dev_priv)) {
- if (dev_priv->psr.psr2_enabled)
- enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
- else
- enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
- } else {
- for_each_pipe(dev_priv, pipe) {
- enum transcoder cpu_transcoder =
- intel_pipe_to_cpu_transcoder(dev_priv, pipe);
- enum intel_display_power_domain power_domain;
- power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- if (!intel_display_power_get_if_enabled(dev_priv,
- power_domain))
- continue;
-
- stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
- VLV_EDP_PSR_CURR_STATE_MASK;
- if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
- (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
- enabled = true;
-
- intel_display_power_put(dev_priv, power_domain);
- }
- }
+ if (dev_priv->psr.psr2_enabled)
+ enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
+ else
+ enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
seq_printf(m, "Main link in standby mode: %s\n",
yesno(dev_priv->psr.link_standby));
- seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
-
- if (!HAS_DDI(dev_priv))
- for_each_pipe(dev_priv, pipe) {
- if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
- (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
- seq_printf(m, " pipe %c", pipe_name(pipe));
- }
- seq_puts(m, "\n");
+ seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
/*
- * VLV/CHV PSR has no kind of performance counter
* SKL+ Perf counter is reset to 0 everytime DC state is entered
*/
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -2701,21 +2731,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Performance_Counter: %u\n", psrperf);
}
- if (dev_priv->psr.psr2_enabled) {
- u32 psr2 = I915_READ(EDP_PSR2_STATUS);
-
- seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n",
- psr2, psr2_live_status(psr2));
- }
- if (dev_priv->psr.enabled) {
- struct drm_dp_aux *aux = &dev_priv->psr.enabled->aux;
- u8 val;
-
- if (drm_dp_dpcd_readb(aux, DP_PSR_STATUS, &val) == 1)
- seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val,
- psr_sink_status(val));
- }
+ psr_source_status(dev_priv, m);
mutex_unlock(&dev_priv->psr.lock);
if (READ_ONCE(dev_priv->psr.debug)) {
@@ -2762,86 +2779,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
i915_edp_psr_debug_get, i915_edp_psr_debug_set,
"%llu\n");
-static int i915_sink_crc(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_connector *connector;
- struct drm_connector_list_iter conn_iter;
- struct intel_dp *intel_dp = NULL;
- struct drm_modeset_acquire_ctx ctx;
- int ret;
- u8 crc[6];
-
- drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-
- drm_connector_list_iter_begin(dev, &conn_iter);
-
- for_each_intel_connector_iter(connector, &conn_iter) {
- struct drm_crtc *crtc;
- struct drm_connector_state *state;
- struct intel_crtc_state *crtc_state;
-
- if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
- continue;
-
-retry:
- ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
- if (ret)
- goto err;
-
- state = connector->base.state;
- if (!state->best_encoder)
- continue;
-
- crtc = state->crtc;
- ret = drm_modeset_lock(&crtc->mutex, &ctx);
- if (ret)
- goto err;
-
- crtc_state = to_intel_crtc_state(crtc->state);
- if (!crtc_state->base.active)
- continue;
-
- /*
- * We need to wait for all crtc updates to complete, to make
- * sure any pending modesets and plane updates are completed.
- */
- if (crtc_state->base.commit) {
- ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done);
-
- if (ret)
- goto err;
- }
-
- intel_dp = enc_to_intel_dp(state->best_encoder);
-
- ret = intel_dp_sink_crc(intel_dp, crtc_state, crc);
- if (ret)
- goto err;
-
- seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
- crc[0], crc[1], crc[2],
- crc[3], crc[4], crc[5]);
- goto out;
-
-err:
- if (ret == -EDEADLK) {
- ret = drm_modeset_backoff(&ctx);
- if (!ret)
- goto retry;
- }
- goto out;
- }
- ret = -ENODEV;
-out:
- drm_connector_list_iter_end(&conn_iter);
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
- return ret;
-}
-
static int i915_energy_uJ(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -3398,28 +3335,13 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
static int i915_wa_registers(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct i915_workarounds *workarounds = &dev_priv->workarounds;
+ struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
int i;
- intel_runtime_pm_get(dev_priv);
-
- seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
- for (i = 0; i < workarounds->count; ++i) {
- i915_reg_t addr;
- u32 mask, value, read;
- bool ok;
-
- addr = workarounds->reg[i].addr;
- mask = workarounds->reg[i].mask;
- value = workarounds->reg[i].value;
- read = I915_READ(addr);
- ok = (value & mask) == (read & mask);
- seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
- i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
- }
-
- intel_runtime_pm_put(dev_priv);
+ seq_printf(m, "Workarounds applied: %d\n", wa->count);
+ for (i = 0; i < wa->count; ++i)
+ seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
+ wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
return 0;
}
@@ -4121,7 +4043,8 @@ fault_irq_set(struct drm_i915_private *i915,
err = i915_gem_wait_for_idle(i915,
I915_WAIT_LOCKED |
- I915_WAIT_INTERRUPTIBLE);
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
if (err)
goto err_unlock;
@@ -4226,7 +4149,8 @@ i915_drop_caches_set(void *data, u64 val)
if (val & DROP_ACTIVE)
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
if (val & DROP_RETIRE)
i915_retire_requests(dev_priv);
@@ -4245,8 +4169,13 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_shrink_all(dev_priv);
fs_reclaim_release(GFP_KERNEL);
- if (val & DROP_IDLE)
- drain_delayed_work(&dev_priv->gt.idle_work);
+ if (val & DROP_IDLE) {
+ do {
+ if (READ_ONCE(dev_priv->gt.active_requests))
+ flush_delayed_work(&dev_priv->gt.retire_work);
+ drain_delayed_work(&dev_priv->gt.idle_work);
+ } while (READ_ONCE(dev_priv->gt.awake));
+ }
if (val & DROP_FREED)
i915_gem_drain_freed_objects(dev_priv);
@@ -4795,7 +4724,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_ppgtt_info", i915_ppgtt_info, 0},
{"i915_llc", i915_llc, 0},
{"i915_edp_psr_status", i915_edp_psr_status, 0},
- {"i915_sink_crc_eDP1", i915_sink_crc, 0},
{"i915_energy_uJ", i915_energy_uJ, 0},
{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
@@ -4829,7 +4757,6 @@ static const struct i915_debugfs_files {
#endif
{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
{"i915_next_seqno", &i915_next_seqno_fops},
- {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
@@ -4849,7 +4776,7 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
{
struct drm_minor *minor = dev_priv->drm.primary;
struct dentry *ent;
- int ret, i;
+ int i;
ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
minor->debugfs_root, to_i915(minor->dev),
@@ -4857,10 +4784,6 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
if (!ent)
return -ENOMEM;
- ret = intel_pipe_crc_create(minor);
- if (ret)
- return ret;
-
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
ent = debugfs_create_file(i915_debugfs_files[i].name,
S_IRUGO | S_IWUSR,
@@ -4982,9 +4905,12 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
debugfs_create_file("i915_dpcd", S_IRUGO, root,
connector, &i915_dpcd_fops);
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
debugfs_create_file("i915_panel_timings", S_IRUGO, root,
connector, &i915_panel_fops);
+ debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
+ connector, &i915_psr_sink_status_fops);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 9c449b8d8eab..f8cfd16be534 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -67,11 +67,18 @@ bool __i915_inject_load_failure(const char *func, int line)
if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
i915_modparams.inject_load_failure, func, line);
+ i915_modparams.inject_load_failure = 0;
return true;
}
return false;
}
+
+bool i915_error_injected(void)
+{
+ return i915_load_fail_count && !i915_modparams.inject_load_failure;
+}
+
#endif
#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
@@ -97,8 +104,13 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
vaf.fmt = fmt;
vaf.va = &args;
- dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
- __builtin_return_address(0), &vaf);
+ if (is_error)
+ dev_printk(level, kdev, "%pV", &vaf);
+ else
+ dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
+ __builtin_return_address(0), &vaf);
+
+ va_end(args);
if (is_error && !shown_bug_once) {
/*
@@ -110,25 +122,8 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
dev_notice(kdev, "%s", FDO_BUG_MSG);
shown_bug_once = true;
}
-
- va_end(args);
-}
-
-static bool i915_error_injected(struct drm_i915_private *dev_priv)
-{
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
- return i915_modparams.inject_load_failure &&
- i915_load_fail_count == i915_modparams.inject_load_failure;
-#else
- return false;
-#endif
}
-#define i915_load_error(dev_priv, fmt, ...) \
- __i915_printk(dev_priv, \
- i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
- fmt, ##__VA_ARGS__)
-
/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
static enum intel_pch
intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
@@ -233,6 +228,8 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv))
id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
+ else if (IS_ICELAKE(dev_priv))
+ id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
if (id)
DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
@@ -246,14 +243,6 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
{
struct pci_dev *pch = NULL;
- /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
- * (which really amounts to a PCH but no South Display).
- */
- if (INTEL_INFO(dev_priv)->num_pipes == 0) {
- dev_priv->pch_type = PCH_NOP;
- return;
- }
-
/*
* The reason to probe ISA bridge instead of Dev31:Fun0 is to
* make graphics device passthrough work easy for VMM, that only
@@ -282,18 +271,28 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
} else if (intel_is_virt_pch(id, pch->subsystem_vendor,
pch->subsystem_device)) {
id = intel_virt_detect_pch(dev_priv);
- if (id) {
- pch_type = intel_pch_type(dev_priv, id);
- if (WARN_ON(pch_type == PCH_NONE))
- pch_type = PCH_NOP;
- } else {
- pch_type = PCH_NOP;
- }
+ pch_type = intel_pch_type(dev_priv, id);
+
+ /* Sanity check virtual PCH id */
+ if (WARN_ON(id && pch_type == PCH_NONE))
+ id = 0;
+
dev_priv->pch_type = pch_type;
dev_priv->pch_id = id;
break;
}
}
+
+ /*
+ * Use PCH_NOP (PCH but no South Display) for PCH platforms without
+ * display.
+ */
+ if (pch && INTEL_INFO(dev_priv)->num_pipes == 0) {
+ DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
+ dev_priv->pch_type = PCH_NOP;
+ dev_priv->pch_id = 0;
+ }
+
if (!pch)
DRM_DEBUG_KMS("No PCH found.\n");
@@ -634,26 +633,6 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
.can_switch = i915_switcheroo_can_switch,
};
-static void i915_gem_fini(struct drm_i915_private *dev_priv)
-{
- /* Flush any outstanding unpin_work. */
- i915_gem_drain_workqueue(dev_priv);
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- intel_uc_fini_hw(dev_priv);
- intel_uc_fini(dev_priv);
- i915_gem_cleanup_engines(dev_priv);
- i915_gem_contexts_fini(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- intel_uc_fini_misc(dev_priv);
- i915_gem_cleanup_userptr(dev_priv);
-
- i915_gem_drain_freed_objects(dev_priv);
-
- WARN_ON(!list_empty(&dev_priv->contexts.list));
-}
-
static int i915_load_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -703,7 +682,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
ret = i915_gem_init(dev_priv);
if (ret)
- goto cleanup_irq;
+ goto cleanup_modeset;
intel_setup_overlay(dev_priv);
@@ -723,6 +702,8 @@ cleanup_gem:
if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
i915_gem_fini(dev_priv);
+cleanup_modeset:
+ intel_modeset_cleanup(dev);
cleanup_irq:
drm_irq_uninstall(dev);
intel_teardown_gmbus(dev_priv);
@@ -919,7 +900,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
spin_lock_init(&dev_priv->uncore.lock);
mutex_init(&dev_priv->sb_lock);
- mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->av_mutex);
mutex_init(&dev_priv->wm.wm_mutex);
mutex_init(&dev_priv->pps_mutex);
@@ -1173,8 +1153,6 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_uncore_sanitize(dev_priv);
- intel_opregion_setup(dev_priv);
-
i915_gem_load_init_fences(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -1189,6 +1167,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* get lost on g4x as well, and interrupt delivery seems to stay
* properly dead afterwards. So we'll just disable them for all
* pre-gen5 chipsets.
+ *
+ * dp aux and gmbus irq on gen4 seems to be able to generate legacy
+ * interrupts even when in MSI mode. This results in spurious
+ * interrupt warnings if the legacy irq no. is shared with another
+ * device. The kernel then disables that interrupt source and so
+ * prevents the other device from working properly.
*/
if (INTEL_GEN(dev_priv) >= 5) {
if (pci_enable_msi(pdev) < 0)
@@ -1197,10 +1181,16 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
ret = intel_gvt_init(dev_priv);
if (ret)
- goto err_ggtt;
+ goto err_msi;
+
+ intel_opregion_setup(dev_priv);
return 0;
+err_msi:
+ if (pdev->msi_enabled)
+ pci_disable_msi(pdev);
+ pm_qos_remove_request(&dev_priv->pm_qos);
err_ggtt:
i915_ggtt_cleanup_hw(dev_priv);
err_perf:
@@ -1433,6 +1423,7 @@ out_fini:
drm_dev_fini(&dev_priv->drm);
out_free:
kfree(dev_priv);
+ pci_set_drvdata(pdev, NULL);
return ret;
}
@@ -1553,17 +1544,30 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
return false;
}
+static int i915_drm_prepare(struct drm_device *dev)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ int err;
+
+ /*
+ * NB intel_display_suspend() may issue new requests after we've
+ * ostensibly marked the GPU as ready-to-sleep here. We need to
+ * split out that work and pull it forward so that after point,
+ * the GPU is not woken again.
+ */
+ err = i915_gem_suspend(i915);
+ if (err)
+ dev_err(&i915->drm.pdev->dev,
+ "GEM idle failed, suspend/resume might fail\n");
+
+ return err;
+}
+
static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
pci_power_t opregion_target_state;
- int error;
-
- /* ignore lid events during suspend */
- mutex_lock(&dev_priv->modeset_restore_lock);
- dev_priv->modeset_restore = MODESET_SUSPENDED;
- mutex_unlock(&dev_priv->modeset_restore_lock);
disable_rpm_wakeref_asserts(dev_priv);
@@ -1575,16 +1579,9 @@ static int i915_drm_suspend(struct drm_device *dev)
pci_save_state(pdev);
- error = i915_gem_suspend(dev_priv);
- if (error) {
- dev_err(&pdev->dev,
- "GEM idle failed, resume might fail\n");
- goto out;
- }
-
intel_display_suspend(dev);
- intel_dp_mst_suspend(dev);
+ intel_dp_mst_suspend(dev_priv);
intel_runtime_pm_disable_interrupts(dev_priv);
intel_hpd_cancel_work(dev_priv);
@@ -1600,7 +1597,6 @@ static int i915_drm_suspend(struct drm_device *dev)
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_notify_adapter(dev_priv, opregion_target_state);
- intel_uncore_suspend(dev_priv);
intel_opregion_unregister(dev_priv);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
@@ -1609,10 +1605,9 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_csr_ucode_suspend(dev_priv);
-out:
enable_rpm_wakeref_asserts(dev_priv);
- return error;
+ return 0;
}
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
@@ -1623,7 +1618,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
disable_rpm_wakeref_asserts(dev_priv);
+ i915_gem_suspend_late(dev_priv);
+
intel_display_set_init_power(dev_priv, false);
+ intel_uncore_suspend(dev_priv);
/*
* In case of firmware assisted context save/restore don't manually
@@ -1710,6 +1708,8 @@ static int i915_drm_resume(struct drm_device *dev)
disable_rpm_wakeref_asserts(dev_priv);
intel_sanitize_gt_powersave(dev_priv);
+ i915_gem_sanitize(dev_priv);
+
ret = i915_ggtt_enable_hw(dev_priv);
if (ret)
DRM_ERROR("failed to re-enable GGTT\n");
@@ -1746,7 +1746,7 @@ static int i915_drm_resume(struct drm_device *dev)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- intel_dp_mst_resume(dev);
+ intel_dp_mst_resume(dev_priv);
intel_display_resume(dev);
@@ -1764,10 +1764,6 @@ static int i915_drm_resume(struct drm_device *dev)
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
- mutex_lock(&dev_priv->modeset_restore_lock);
- dev_priv->modeset_restore = MODESET_DONE;
- mutex_unlock(&dev_priv->modeset_restore_lock);
-
intel_opregion_notify_adapter(dev_priv, PCI_D0);
enable_rpm_wakeref_asserts(dev_priv);
@@ -1851,7 +1847,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
else
intel_display_set_init_power(dev_priv, true);
- i915_gem_sanitize(dev_priv);
+ intel_engines_sanitize(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
@@ -2081,6 +2077,22 @@ out:
return ret;
}
+static int i915_pm_prepare(struct device *kdev)
+{
+ struct pci_dev *pdev = to_pci_dev(kdev);
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ if (!dev) {
+ dev_err(kdev, "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_prepare(dev);
+}
+
static int i915_pm_suspend(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
@@ -2731,6 +2743,7 @@ const struct dev_pm_ops i915_pm_ops = {
* S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
* PMSG_RESUME]
*/
+ .prepare = i915_pm_prepare,
.suspend = i915_pm_suspend,
.suspend_late = i915_pm_suspend_late,
.resume_early = i915_pm_resume_early,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 71e1aa54f774..4aca5344863d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -40,6 +40,7 @@
#include <linux/hash.h>
#include <linux/intel-iommu.h>
#include <linux/kref.h>
+#include <linux/mm_types.h>
#include <linux/perf_event.h>
#include <linux/pm_qos.h>
#include <linux/reservation.h>
@@ -85,8 +86,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20180514"
-#define DRIVER_TIMESTAMP 1526300884
+#define DRIVER_DATE "20180719"
+#define DRIVER_TIMESTAMP 1532015279
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -107,13 +108,24 @@
I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+
bool __i915_inject_load_failure(const char *func, int line);
#define i915_inject_load_failure() \
__i915_inject_load_failure(__func__, __LINE__)
+
+bool i915_error_injected(void);
+
#else
+
#define i915_inject_load_failure() false
+#define i915_error_injected() false
+
#endif
+#define i915_load_error(i915, fmt, ...) \
+ __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
+ fmt, ##__VA_ARGS__)
+
typedef struct {
uint32_t val;
} uint_fixed_16_16_t;
@@ -287,7 +299,6 @@ struct i915_hotplug {
u32 event_bits;
struct delayed_work reenable_work;
- struct intel_digital_port *irq_port[I915_MAX_PORTS];
u32 long_port_mask;
u32 short_port_mask;
struct work_struct dig_port_work;
@@ -500,6 +511,7 @@ struct intel_fbc {
bool enabled;
bool active;
+ bool flip_pending;
bool underrun_detected;
struct work_struct underrun_work;
@@ -567,12 +579,6 @@ struct intel_fbc {
unsigned int gen9_wa_cfb_stride;
} params;
- struct intel_fbc_work {
- bool scheduled;
- u64 scheduled_vblank;
- struct work_struct work;
- } work;
-
const char *no_fbc_reason;
};
@@ -608,26 +614,17 @@ struct i915_psr {
bool sink_support;
struct intel_dp *enabled;
bool active;
- struct delayed_work work;
+ struct work_struct work;
unsigned busy_frontbuffer_bits;
bool sink_psr2_support;
bool link_standby;
bool colorimetry_support;
bool alpm;
- bool has_hw_tracking;
bool psr2_enabled;
u8 sink_sync_latency;
bool debug;
ktime_t last_entry_attempt;
ktime_t last_exit;
-
- void (*enable_source)(struct intel_dp *,
- const struct intel_crtc_state *);
- void (*disable_source)(struct intel_dp *,
- const struct intel_crtc_state *);
- void (*enable_sink)(struct intel_dp *);
- void (*activate)(struct intel_dp *);
- void (*setup_vsc)(struct intel_dp *, const struct intel_crtc_state *);
};
enum intel_pch {
@@ -639,7 +636,7 @@ enum intel_pch {
PCH_KBP, /* Kaby Lake PCH */
PCH_CNP, /* Cannon Lake PCH */
PCH_ICP, /* Ice Lake PCH */
- PCH_NOP,
+ PCH_NOP, /* PCH without south display */
};
enum intel_sbi_destination {
@@ -782,11 +779,17 @@ struct intel_rps {
u8 rp0_freq; /* Non-overclocked max frequency. */
u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
- u8 up_threshold; /* Current %busy required to uplock */
- u8 down_threshold; /* Current %busy required to downclock */
-
int last_adj;
- enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
+
+ struct {
+ struct mutex mutex;
+
+ enum { LOW_POWER, BETWEEN, HIGH_POWER } mode;
+ unsigned int interactive;
+
+ u8 up_threshold; /* Current %busy required to uplock */
+ u8 down_threshold; /* Current %busy required to downclock */
+ } power;
bool enabled;
atomic_t num_waiters;
@@ -955,7 +958,7 @@ struct i915_gem_mm {
/**
* Small stash of WC pages
*/
- struct pagevec wc_stash;
+ struct pagestash wc_stash;
/**
* tmpfs instance used for shmem backed objects
@@ -1003,16 +1006,13 @@ struct i915_gem_mm {
#define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */
#define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */
-enum modeset_restore {
- MODESET_ON_LID_OPEN,
- MODESET_DONE,
- MODESET_SUSPENDED,
-};
+#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */
#define DP_AUX_A 0x40
#define DP_AUX_B 0x10
#define DP_AUX_C 0x20
#define DP_AUX_D 0x30
+#define DP_AUX_E 0x50
#define DP_AUX_F 0x60
#define DDC_PIN_B 0x05
@@ -1057,9 +1057,9 @@ struct intel_vbt_data {
/* Feature bits */
unsigned int int_tv_support:1;
unsigned int lvds_dither:1;
- unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
+ unsigned int int_lvds_support:1;
unsigned int display_clock_mode:1;
unsigned int fdi_rx_polarity_inverted:1;
unsigned int panel_type:4;
@@ -1075,7 +1075,6 @@ struct intel_vbt_data {
int vswing;
bool low_vswing;
bool initialized;
- bool support;
int bpp;
struct edp_power_seq pps;
} edp;
@@ -1086,8 +1085,8 @@ struct intel_vbt_data {
bool require_aux_wakeup;
int idle_frames;
enum psr_lines_to_wait lines_to_wait;
- int tp1_wakeup_time;
- int tp2_tp3_wakeup_time;
+ int tp1_wakeup_time_us;
+ int tp2_tp3_wakeup_time_us;
} psr;
struct {
@@ -1272,20 +1271,11 @@ enum intel_pipe_crc_source {
INTEL_PIPE_CRC_SOURCE_MAX,
};
-struct intel_pipe_crc_entry {
- uint32_t frame;
- uint32_t crc[5];
-};
-
#define INTEL_PIPE_CRC_ENTRIES_NR 128
struct intel_pipe_crc {
spinlock_t lock;
- bool opened; /* exclusive access to the result file */
- struct intel_pipe_crc_entry *entries;
- enum intel_pipe_crc_source source;
- int head, tail;
- wait_queue_head_t wq;
int skipped;
+ enum intel_pipe_crc_source source;
};
struct i915_frontbuffer_tracking {
@@ -1300,7 +1290,7 @@ struct i915_frontbuffer_tracking {
};
struct i915_wa_reg {
- i915_reg_t addr;
+ u32 addr;
u32 value;
/* bitmask representing WA bits */
u32 mask;
@@ -1740,12 +1730,9 @@ struct drm_i915_private {
unsigned long quirks;
- enum modeset_restore modeset_restore;
- struct mutex modeset_restore_lock;
struct drm_atomic_state *modeset_restore_state;
struct drm_modeset_acquire_ctx reset_ctx;
- struct list_head vm_list; /* Global list of all address spaces */
struct i915_ggtt ggtt; /* VM representing the global address space */
struct i915_gem_mm mm;
@@ -1851,6 +1838,7 @@ struct drm_i915_private {
*/
struct ida hw_ida;
#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
+#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
} contexts;
@@ -1958,7 +1946,9 @@ struct drm_i915_private {
*/
struct i915_perf_stream *exclusive_stream;
+ struct intel_context *pinned_ctx;
u32 specific_ctx_id;
+ u32 specific_ctx_id_mask;
struct hrtimer poll_check_timer;
wait_queue_head_t poll_wq;
@@ -2311,6 +2301,7 @@ intel_info(const struct drm_i915_private *dev_priv)
}
#define INTEL_INFO(dev_priv) intel_info((dev_priv))
+#define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
#define INTEL_GEN(dev_priv) ((dev_priv)->info.gen)
#define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id)
@@ -2563,17 +2554,10 @@ intel_info(const struct drm_i915_private *dev_priv)
(IS_CANNONLAKE(dev_priv) || \
IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
-/*
- * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
- * even when in MSI mode. This results in spurious interrupt warnings if the
- * legacy irq no. is shared with another device. The kernel then disables that
- * interrupt source and so prevents the other device from working properly.
- *
- * Since we don't enable MSI anymore on gen4, we can always use GMBUS/AUX
- * interrupts.
- */
-#define HAS_AUX_IRQ(dev_priv) true
#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
+#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
+ IS_GEMINILAKE(dev_priv) || \
+ IS_KABYLAKE(dev_priv))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
@@ -2748,14 +2732,14 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
int intel_engines_init_mmio(struct drm_i915_private *dev_priv);
int intel_engines_init(struct drm_i915_private *dev_priv);
+u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv);
+
/* intel_hotplug.c */
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask);
void intel_hpd_init(struct drm_i915_private *dev_priv);
void intel_hpd_init_work(struct drm_i915_private *dev_priv);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
-enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv,
- enum hpd_pin pin);
enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
enum port port);
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
@@ -3102,9 +3086,6 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
}
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-void i915_vma_move_to_active(struct i915_vma *vma,
- struct i915_request *rq,
- unsigned int flags);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -3169,12 +3150,14 @@ void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
+void i915_gem_fini(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
- unsigned int flags);
+ unsigned int flags, long timeout);
int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
+void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
-int i915_gem_fault(struct vm_fault *vmf);
+vm_fault_t i915_gem_fault(struct vm_fault *vmf);
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
long timeout,
@@ -3213,7 +3196,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
- return container_of(vm, struct i915_hw_ppgtt, base);
+ return container_of(vm, struct i915_hw_ppgtt, vm);
}
/* i915_gem_fence_reg.c */
@@ -3320,7 +3303,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915,
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
void i915_gem_shrinker_register(struct drm_i915_private *i915);
void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
-
+void i915_gem_shrinker_taints_mutex(struct mutex *mutex);
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
@@ -3445,6 +3428,8 @@ extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
+extern void intel_rps_mark_interactive(struct drm_i915_private *i915,
+ bool interactive);
extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
@@ -3678,14 +3663,6 @@ static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
}
-static inline unsigned long
-timespec_to_jiffies_timeout(const struct timespec *value)
-{
- unsigned long j = timespec_to_jiffies(value);
-
- return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
-}
-
/*
* If you need to wait X milliseconds between events A and B, but event B
* doesn't happen exactly after event A, you record the timestamp (jiffies) of
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 17c5097721e8..fcc73a6ab503 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -65,7 +65,7 @@ insert_mappable_node(struct i915_ggtt *ggtt,
struct drm_mm_node *node, u32 size)
{
memset(node, 0, sizeof(*node));
- return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
+ return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
size, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
@@ -139,6 +139,8 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static u32 __i915_gem_park(struct drm_i915_private *i915)
{
+ GEM_TRACE("\n");
+
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(i915->gt.active_requests);
GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
@@ -181,6 +183,8 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
void i915_gem_park(struct drm_i915_private *i915)
{
+ GEM_TRACE("\n");
+
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(i915->gt.active_requests);
@@ -193,6 +197,8 @@ void i915_gem_park(struct drm_i915_private *i915)
void i915_gem_unpark(struct drm_i915_private *i915)
{
+ GEM_TRACE("\n");
+
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!i915->gt.active_requests);
@@ -243,17 +249,17 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct i915_vma *vma;
u64 pinned;
- pinned = ggtt->base.reserved;
+ pinned = ggtt->vm.reserved;
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
+ list_for_each_entry(vma, &ggtt->vm.active_list, vm_link)
if (i915_vma_is_pinned(vma))
pinned += vma->node.size;
- list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
+ list_for_each_entry(vma, &ggtt->vm.inactive_list, vm_link)
if (i915_vma_is_pinned(vma))
pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex);
- args->aper_size = ggtt->base.total;
+ args->aper_size = ggtt->vm.total;
args->aper_available_size = args->aper_size - pinned;
return 0;
@@ -796,7 +802,7 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
* that was!).
*/
- wmb();
+ i915_gem_chipset_flush(dev_priv);
intel_runtime_pm_get(dev_priv);
spin_lock_irq(&dev_priv->uncore.lock);
@@ -831,6 +837,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
}
break;
+ case I915_GEM_DOMAIN_WC:
+ wmb();
+ break;
+
case I915_GEM_DOMAIN_CPU:
i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
break;
@@ -1217,9 +1227,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
wmb();
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
- node.start, I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+ node.start, I915_CACHE_NONE, 0);
wmb();
} else {
page_base += offset & PAGE_MASK;
@@ -1240,8 +1250,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
out_unpin:
if (node.allocated) {
wmb();
- ggtt->base.clear_range(&ggtt->base,
- node.start, node.size);
+ ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node);
} else {
i915_vma_unpin(vma);
@@ -1420,9 +1429,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
wmb(); /* flush the write before we modify the GGTT */
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
- node.start, I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+ node.start, I915_CACHE_NONE, 0);
wmb(); /* flush modifications to the GGTT (insert_page) */
} else {
page_base += offset & PAGE_MASK;
@@ -1449,8 +1458,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
out_unpin:
if (node.allocated) {
wmb();
- ggtt->base.clear_range(&ggtt->base,
- node.start, node.size);
+ ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node);
} else {
i915_vma_unpin(vma);
@@ -1619,6 +1627,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto err;
}
+ /* Writes not allowed into this read-only object */
+ if (i915_gem_object_is_readonly(obj)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
ret = -ENODEV;
@@ -1991,9 +2005,9 @@ compute_partial_view(struct drm_i915_gem_object *obj,
* The current feature set supported by i915_gem_fault() and thus GTT mmaps
* is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
*/
-int i915_gem_fault(struct vm_fault *vmf)
+vm_fault_t i915_gem_fault(struct vm_fault *vmf)
{
-#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
+#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
struct vm_area_struct *area = vmf->vma;
struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
struct drm_device *dev = obj->base.dev;
@@ -2004,6 +2018,10 @@ int i915_gem_fault(struct vm_fault *vmf)
pgoff_t page_offset;
int ret;
+ /* Sanity check that we allow writing into this object */
+ if (i915_gem_object_is_readonly(obj) && write)
+ return VM_FAULT_SIGBUS;
+
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
@@ -2114,10 +2132,9 @@ err:
* fail). But any other -EIO isn't ours (e.g. swap in failure)
* and so needs to be reported.
*/
- if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
- ret = VM_FAULT_SIGBUS;
- break;
- }
+ if (!i915_terminally_wedged(&dev_priv->gpu_error))
+ return VM_FAULT_SIGBUS;
+ /* else: fall through */
case -EAGAIN:
/*
* EAGAIN means the gpu is hung and we'll wait for the error
@@ -2132,21 +2149,16 @@ err:
* EBUSY is ok: this just means that another thread
* already did the job.
*/
- ret = VM_FAULT_NOPAGE;
- break;
+ return VM_FAULT_NOPAGE;
case -ENOMEM:
- ret = VM_FAULT_OOM;
- break;
+ return VM_FAULT_OOM;
case -ENOSPC:
case -EFAULT:
- ret = VM_FAULT_SIGBUS;
- break;
+ return VM_FAULT_SIGBUS;
default:
WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
- ret = VM_FAULT_SIGBUS;
- break;
+ return VM_FAULT_SIGBUS;
}
- return ret;
}
static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
@@ -2265,7 +2277,9 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
/* Attempt to reap some mmap space from dead objects */
do {
- err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
+ err = i915_gem_wait_for_idle(dev_priv,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
if (err)
break;
@@ -2410,29 +2424,15 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
rcu_read_unlock();
}
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
- enum i915_mm_subclass subclass)
+static struct sg_table *
+__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *pages;
- if (i915_gem_object_has_pinned_pages(obj))
- return;
-
- GEM_BUG_ON(obj->bind_count);
- if (!i915_gem_object_has_pages(obj))
- return;
-
- /* May be called by shrinker from within get_pages() (on another bo) */
- mutex_lock_nested(&obj->mm.lock, subclass);
- if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
- goto unlock;
-
- /* ->put_pages might need to allocate memory for the bit17 swizzle
- * array, hence protect them from being reaped by removing them from gtt
- * lists early. */
pages = fetch_and_zero(&obj->mm.pages);
- GEM_BUG_ON(!pages);
+ if (!pages)
+ return NULL;
spin_lock(&i915->mm.obj_lock);
list_del(&obj->mm.link);
@@ -2451,12 +2451,37 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
}
__i915_gem_object_reset_page_iter(obj);
+ obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
+
+ return pages;
+}
+
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+ enum i915_mm_subclass subclass)
+{
+ struct sg_table *pages;
+ if (i915_gem_object_has_pinned_pages(obj))
+ return;
+
+ GEM_BUG_ON(obj->bind_count);
+ if (!i915_gem_object_has_pages(obj))
+ return;
+
+ /* May be called by shrinker from within get_pages() (on another bo) */
+ mutex_lock_nested(&obj->mm.lock, subclass);
+ if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
+ goto unlock;
+
+ /*
+ * ->put_pages might need to allocate memory for the bit17 swizzle
+ * array, hence protect them from being reaped by removing them from gtt
+ * lists early.
+ */
+ pages = __i915_gem_object_unset_pages(obj);
if (!IS_ERR(pages))
obj->ops->put_pages(obj, pages);
- obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
-
unlock:
mutex_unlock(&obj->mm.lock);
}
@@ -2974,16 +2999,16 @@ static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
- DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, ban %s\n",
- ctx->name, atomic_read(&ctx->guilty_count),
- score, yesno(banned && bannable));
-
/* Cool contexts don't accumulate client ban score */
if (!bannable)
return;
- if (banned)
+ if (banned) {
+ DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n",
+ ctx->name, atomic_read(&ctx->guilty_count),
+ score);
i915_gem_context_set_banned(ctx);
+ }
if (!IS_ERR_OR_NULL(ctx->file_priv))
i915_gem_client_mark_guilty(ctx->file_priv, ctx);
@@ -3031,7 +3056,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
struct i915_request *
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
{
- struct i915_request *request = NULL;
+ struct i915_request *request;
/*
* During the reset sequence, we must prevent the engine from
@@ -3042,52 +3067,7 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
*/
intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
- /*
- * Prevent the signaler thread from updating the request
- * state (by calling dma_fence_signal) as we are processing
- * the reset. The write from the GPU of the seqno is
- * asynchronous and the signaler thread may see a different
- * value to us and declare the request complete, even though
- * the reset routine have picked that request as the active
- * (incomplete) request. This conflict is not handled
- * gracefully!
- */
- kthread_park(engine->breadcrumbs.signaler);
-
- /*
- * Prevent request submission to the hardware until we have
- * completed the reset in i915_gem_reset_finish(). If a request
- * is completed by one engine, it may then queue a request
- * to a second via its execlists->tasklet *just* as we are
- * calling engine->init_hw() and also writing the ELSP.
- * Turning off the execlists->tasklet until the reset is over
- * prevents the race.
- *
- * Note that this needs to be a single atomic operation on the
- * tasklet (flush existing tasks, prevent new tasks) to prevent
- * a race between reset and set-wedged. It is not, so we do the best
- * we can atm and make sure we don't lock the machine up in the more
- * common case of recursively being called from set-wedged from inside
- * i915_reset.
- */
- if (!atomic_read(&engine->execlists.tasklet.count))
- tasklet_kill(&engine->execlists.tasklet);
- tasklet_disable(&engine->execlists.tasklet);
-
- /*
- * We're using worker to queue preemption requests from the tasklet in
- * GuC submission mode.
- * Even though tasklet was disabled, we may still have a worker queued.
- * Let's make sure that all workers scheduled before disabling the
- * tasklet are completed before continuing with the reset.
- */
- if (engine->i915->guc.preempt_wq)
- flush_workqueue(engine->i915->guc.preempt_wq);
-
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
-
- request = i915_gem_find_active_request(engine);
+ request = engine->reset.prepare(engine);
if (request && request->fence.error == -EIO)
request = ERR_PTR(-EIO); /* Previous reset failed! */
@@ -3117,43 +3097,24 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
return err;
}
-static void skip_request(struct i915_request *request)
-{
- void *vaddr = request->ring->vaddr;
- u32 head;
-
- /* As this request likely depends on state from the lost
- * context, clear out all the user operations leaving the
- * breadcrumb at the end (so we get the fence notifications).
- */
- head = request->head;
- if (request->postfix < head) {
- memset(vaddr + head, 0, request->ring->size - head);
- head = 0;
- }
- memset(vaddr + head, 0, request->postfix - head);
-
- dma_fence_set_error(&request->fence, -EIO);
-}
-
static void engine_skip_context(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- struct i915_gem_context *hung_ctx = request->ctx;
+ struct i915_gem_context *hung_ctx = request->gem_context;
struct i915_timeline *timeline = request->timeline;
unsigned long flags;
GEM_BUG_ON(timeline == &engine->timeline);
spin_lock_irqsave(&engine->timeline.lock, flags);
- spin_lock_nested(&timeline->lock, SINGLE_DEPTH_NESTING);
+ spin_lock(&timeline->lock);
list_for_each_entry_continue(request, &engine->timeline.requests, link)
- if (request->ctx == hung_ctx)
- skip_request(request);
+ if (request->gem_context == hung_ctx)
+ i915_request_skip(request, -EIO);
list_for_each_entry(request, &timeline->requests, link)
- skip_request(request);
+ i915_request_skip(request, -EIO);
spin_unlock(&timeline->lock);
spin_unlock_irqrestore(&engine->timeline.lock, flags);
@@ -3195,11 +3156,11 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
}
if (stalled) {
- i915_gem_context_mark_guilty(request->ctx);
- skip_request(request);
+ i915_gem_context_mark_guilty(request->gem_context);
+ i915_request_skip(request, -EIO);
/* If this context is now banned, skip all pending requests. */
- if (i915_gem_context_is_banned(request->ctx))
+ if (i915_gem_context_is_banned(request->gem_context))
engine_skip_context(request);
} else {
/*
@@ -3209,15 +3170,17 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
*/
request = i915_gem_find_active_request(engine);
if (request) {
- i915_gem_context_mark_innocent(request->ctx);
+ unsigned long flags;
+
+ i915_gem_context_mark_innocent(request->gem_context);
dma_fence_set_error(&request->fence, -EAGAIN);
/* Rewind the engine to replay the incomplete rq */
- spin_lock_irq(&engine->timeline.lock);
+ spin_lock_irqsave(&engine->timeline.lock, flags);
request = list_prev_entry(request, link);
if (&request->link == &engine->timeline.requests)
request = NULL;
- spin_unlock_irq(&engine->timeline.lock);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
}
@@ -3238,13 +3201,8 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine,
if (request)
request = i915_gem_reset_request(engine, request, stalled);
- if (request) {
- DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
- engine->name, request->global_seqno);
- }
-
/* Setup the CS to resume from the breadcrumb of the hung request */
- engine->reset_hw(engine, request);
+ engine->reset.reset(engine, request);
}
void i915_gem_reset(struct drm_i915_private *dev_priv,
@@ -3258,14 +3216,14 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
i915_retire_requests(dev_priv);
for_each_engine(engine, dev_priv, id) {
- struct i915_gem_context *ctx;
+ struct intel_context *ce;
i915_gem_reset_engine(engine,
engine->hangcheck.active_request,
stalled_mask & ENGINE_MASK(id));
- ctx = fetch_and_zero(&engine->last_retired_context);
- if (ctx)
- intel_context_unpin(ctx, engine);
+ ce = fetch_and_zero(&engine->last_retired_context);
+ if (ce)
+ intel_context_unpin(ce);
/*
* Ostensibily, we always want a context loaded for powersaving,
@@ -3283,7 +3241,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
rq = i915_request_alloc(engine,
dev_priv->kernel_context);
if (!IS_ERR(rq))
- __i915_request_add(rq, false);
+ i915_request_add(rq);
}
}
@@ -3292,8 +3250,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
{
- tasklet_enable(&engine->execlists.tasklet);
- kthread_unpark(engine->breadcrumbs.signaler);
+ engine->reset.finish(engine);
intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
}
@@ -3571,6 +3528,22 @@ new_requests_since_last_retire(const struct drm_i915_private *i915)
work_pending(&i915->gt.idle_work.work));
}
+static void assert_kernel_context_is_current(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return;
+
+ GEM_BUG_ON(i915->gt.active_requests);
+ for_each_engine(engine, i915, id) {
+ GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
+ GEM_BUG_ON(engine->last_retired_context !=
+ to_intel_context(i915->kernel_context, engine));
+ }
+}
+
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
@@ -3582,6 +3555,24 @@ i915_gem_idle_work_handler(struct work_struct *work)
if (!READ_ONCE(dev_priv->gt.awake))
return;
+ if (READ_ONCE(dev_priv->gt.active_requests))
+ return;
+
+ /*
+ * Flush out the last user context, leaving only the pinned
+ * kernel context resident. When we are idling on the kernel_context,
+ * no more new requests (with a context switch) are emitted and we
+ * can finally rest. A consequence is that the idle work handler is
+ * always called at least twice before idling (and if the system is
+ * idle that implies a round trip through the retire worker).
+ */
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_gem_switch_to_kernel_context(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n",
+ READ_ONCE(dev_priv->gt.active_requests));
+
/*
* Wait for last execlists context complete, but bail out in case a
* new request is submitted. As we don't trust the hardware, we
@@ -3615,6 +3606,8 @@ i915_gem_idle_work_handler(struct work_struct *work)
epoch = __i915_gem_park(dev_priv);
+ assert_kernel_context_is_current(dev_priv);
+
rearm_hangcheck = false;
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -3761,9 +3754,31 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
return ret;
}
-static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags)
+static long wait_for_timeline(struct i915_timeline *tl,
+ unsigned int flags, long timeout)
{
- return i915_gem_active_wait(&tl->last_request, flags);
+ struct i915_request *rq;
+
+ rq = i915_gem_active_get_unlocked(&tl->last_request);
+ if (!rq)
+ return timeout;
+
+ /*
+ * "Race-to-idle".
+ *
+ * Switching to the kernel context is often used a synchronous
+ * step prior to idling, e.g. in suspend for flushing all
+ * current operations to memory before sleeping. These we
+ * want to complete as quickly as possible to avoid prolonged
+ * stalls, so allow the gpu to boost to maximum clocks.
+ */
+ if (flags & I915_WAIT_FOR_IDLE_BOOST)
+ gen6_rps_boost(rq, NULL);
+
+ timeout = i915_request_wait(rq, flags, timeout);
+ i915_request_put(rq);
+
+ return timeout;
}
static int wait_for_engines(struct drm_i915_private *i915)
@@ -3779,8 +3794,13 @@ static int wait_for_engines(struct drm_i915_private *i915)
return 0;
}
-int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
+int i915_gem_wait_for_idle(struct drm_i915_private *i915,
+ unsigned int flags, long timeout)
{
+ GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
+ flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
+ timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
+
/* If the device is asleep, we have no requests outstanding */
if (!READ_ONCE(i915->gt.awake))
return 0;
@@ -3792,26 +3812,31 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
lockdep_assert_held(&i915->drm.struct_mutex);
list_for_each_entry(tl, &i915->gt.timelines, link) {
- err = wait_for_timeline(tl, flags);
- if (err)
- return err;
+ timeout = wait_for_timeline(tl, flags, timeout);
+ if (timeout < 0)
+ return timeout;
}
- i915_retire_requests(i915);
- return wait_for_engines(i915);
+ err = wait_for_engines(i915);
+ if (err)
+ return err;
+
+ i915_retire_requests(i915);
+ GEM_BUG_ON(i915->gt.active_requests);
} else {
struct intel_engine_cs *engine;
enum intel_engine_id id;
- int err;
for_each_engine(engine, i915, id) {
- err = wait_for_timeline(&engine->timeline, flags);
- if (err)
- return err;
- }
+ struct i915_timeline *tl = &engine->timeline;
- return 0;
+ timeout = wait_for_timeline(tl, flags, timeout);
+ if (timeout < 0)
+ return timeout;
+ }
}
+
+ return 0;
}
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
@@ -4385,7 +4410,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 flags)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct i915_address_space *vm = &dev_priv->ggtt.base;
+ struct i915_address_space *vm = &dev_priv->ggtt.vm;
struct i915_vma *vma;
int ret;
@@ -4973,25 +4998,25 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
i915_gem_object_put(obj);
}
-static void assert_kernel_context_is_current(struct drm_i915_private *i915)
+void i915_gem_sanitize(struct drm_i915_private *i915)
{
- struct i915_gem_context *kernel_context = i915->kernel_context;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ int err;
- for_each_engine(engine, i915, id) {
- GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
- GEM_BUG_ON(engine->last_retired_context != kernel_context);
- }
-}
+ GEM_TRACE("\n");
-void i915_gem_sanitize(struct drm_i915_private *i915)
-{
- if (i915_terminally_wedged(&i915->gpu_error)) {
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&i915->drm.struct_mutex);
+
+ intel_runtime_pm_get(i915);
+ intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+
+ /*
+ * As we have just resumed the machine and woken the device up from
+ * deep PCI sleep (presumably D3_cold), assume the HW has been reset
+ * back to defaults, recovering from whatever wedged state we left it
+ * in and so worth trying to use the device once more.
+ */
+ if (i915_terminally_wedged(&i915->gpu_error))
i915_gem_unset_wedged(i915);
- mutex_unlock(&i915->drm.struct_mutex);
- }
/*
* If we inherit context state from the BIOS or earlier occupants
@@ -5001,60 +5026,94 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* it may impact the display and we are uncertain about the stability
* of the reset, so this could be applied to even earlier gen.
*/
+ err = -ENODEV;
if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
- WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
+ err = WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
+ if (!err)
+ intel_engines_sanitize(i915);
+
+ intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+ intel_runtime_pm_put(i915);
+
+ i915_gem_contexts_lost(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
}
-int i915_gem_suspend(struct drm_i915_private *dev_priv)
+int i915_gem_suspend(struct drm_i915_private *i915)
{
- struct drm_device *dev = &dev_priv->drm;
int ret;
- intel_runtime_pm_get(dev_priv);
- intel_suspend_gt_powersave(dev_priv);
+ GEM_TRACE("\n");
- mutex_lock(&dev->struct_mutex);
+ intel_runtime_pm_get(i915);
+ intel_suspend_gt_powersave(i915);
- /* We have to flush all the executing contexts to main memory so
+ mutex_lock(&i915->drm.struct_mutex);
+
+ /*
+ * We have to flush all the executing contexts to main memory so
* that they can saved in the hibernation image. To ensure the last
* context image is coherent, we have to switch away from it. That
- * leaves the dev_priv->kernel_context still active when
+ * leaves the i915->kernel_context still active when
* we actually suspend, and its image in memory may not match the GPU
* state. Fortunately, the kernel_context is disposable and we do
* not rely on its state.
*/
- if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
- ret = i915_gem_switch_to_kernel_context(dev_priv);
+ if (!i915_terminally_wedged(&i915->gpu_error)) {
+ ret = i915_gem_switch_to_kernel_context(i915);
if (ret)
goto err_unlock;
- ret = i915_gem_wait_for_idle(dev_priv,
+ ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
+ I915_WAIT_LOCKED |
+ I915_WAIT_FOR_IDLE_BOOST,
+ MAX_SCHEDULE_TIMEOUT);
if (ret && ret != -EIO)
goto err_unlock;
- assert_kernel_context_is_current(dev_priv);
+ assert_kernel_context_is_current(i915);
}
- i915_gem_contexts_lost(dev_priv);
- mutex_unlock(&dev->struct_mutex);
+ i915_retire_requests(i915); /* ensure we flush after wedging */
+
+ mutex_unlock(&i915->drm.struct_mutex);
- intel_uc_suspend(dev_priv);
+ intel_uc_suspend(i915);
- cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
- cancel_delayed_work_sync(&dev_priv->gt.retire_work);
+ cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
+ cancel_delayed_work_sync(&i915->gt.retire_work);
- /* As the idle_work is rearming if it detects a race, play safe and
+ /*
+ * As the idle_work is rearming if it detects a race, play safe and
* repeat the flush until it is definitely idle.
*/
- drain_delayed_work(&dev_priv->gt.idle_work);
+ drain_delayed_work(&i915->gt.idle_work);
- /* Assert that we sucessfully flushed all the work and
+ /*
+ * Assert that we successfully flushed all the work and
* reset the GPU back to its idle, low power state.
*/
- WARN_ON(dev_priv->gt.awake);
- if (WARN_ON(!intel_engines_are_idle(dev_priv)))
- i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
+ WARN_ON(i915->gt.awake);
+ if (WARN_ON(!intel_engines_are_idle(i915)))
+ i915_gem_set_wedged(i915); /* no hope, discard everything */
+
+ intel_runtime_pm_put(i915);
+ return 0;
+
+err_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ intel_runtime_pm_put(i915);
+ return ret;
+}
+
+void i915_gem_suspend_late(struct drm_i915_private *i915)
+{
+ struct drm_i915_gem_object *obj;
+ struct list_head *phases[] = {
+ &i915->mm.unbound_list,
+ &i915->mm.bound_list,
+ NULL
+ }, **phase;
/*
* Neither the BIOS, ourselves or any other kernel
@@ -5075,20 +5134,22 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
* machines is a good idea, we don't - just in case it leaves the
* machine in an unusable condition.
*/
- intel_uc_sanitize(dev_priv);
- i915_gem_sanitize(dev_priv);
- intel_runtime_pm_put(dev_priv);
- return 0;
+ mutex_lock(&i915->drm.struct_mutex);
+ for (phase = phases; *phase; phase++) {
+ list_for_each_entry(obj, *phase, mm.link)
+ WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
-err_unlock:
- mutex_unlock(&dev->struct_mutex);
- intel_runtime_pm_put(dev_priv);
- return ret;
+ intel_uc_sanitize(i915);
+ i915_gem_sanitize(i915);
}
void i915_gem_resume(struct drm_i915_private *i915)
{
+ GEM_TRACE("\n");
+
WARN_ON(i915->gt.awake);
mutex_lock(&i915->drm.struct_mutex);
@@ -5262,8 +5323,18 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
/* Only when the HW is re-initialised, can we replay the requests */
ret = __i915_gem_restart_engines(dev_priv);
+ if (ret)
+ goto cleanup_uc;
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+ return 0;
+
+cleanup_uc:
+ intel_uc_fini_hw(dev_priv);
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
return ret;
}
@@ -5300,7 +5371,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
if (engine->init_context)
err = engine->init_context(rq);
- __i915_request_add(rq, true);
+ i915_request_add(rq);
if (err)
goto err_active;
}
@@ -5309,9 +5380,11 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
if (err)
goto err_active;
- err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
- if (err)
+ if (i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, HZ / 5)) {
+ i915_gem_set_wedged(i915);
+ err = -EIO; /* Caller will declare us wedged */
goto err_active;
+ }
assert_kernel_context_is_current(i915);
@@ -5374,7 +5447,9 @@ err_active:
if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
goto out_ctx;
- if (WARN_ON(i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED)))
+ if (WARN_ON(i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT)))
goto out_ctx;
i915_gem_contexts_lost(i915);
@@ -5385,12 +5460,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
- /*
- * We need to fallback to 4K pages since gvt gtt handling doesn't
- * support huge page entries - we will need to check either hypervisor
- * mm can support huge guest page or just do emulation in gvt.
- */
- if (intel_vgpu_active(dev_priv))
+ /* We need to fallback to 4K pages if host doesn't support huge gtt. */
+ if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
mkwrite_device_info(dev_priv)->page_sizes =
I915_GTT_PAGE_SIZE_4K;
@@ -5408,13 +5479,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret)
return ret;
- ret = intel_wopcm_init(&dev_priv->wopcm);
+ ret = intel_uc_init_misc(dev_priv);
if (ret)
return ret;
- ret = intel_uc_init_misc(dev_priv);
+ ret = intel_wopcm_init(&dev_priv->wopcm);
if (ret)
- return ret;
+ goto err_uc_misc;
/* This is just a security blanket to placate dragons.
* On some systems, we very sporadically observe that the first TLBs
@@ -5490,8 +5561,14 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* driver doesn't explode during runtime.
*/
err_init_hw:
- i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
- i915_gem_contexts_lost(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ WARN_ON(i915_gem_suspend(dev_priv));
+ i915_gem_suspend_late(dev_priv);
+
+ i915_gem_drain_workqueue(dev_priv);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
intel_uc_fini_hw(dev_priv);
err_uc_init:
intel_uc_fini(dev_priv);
@@ -5508,6 +5585,7 @@ err_unlock:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
+err_uc_misc:
intel_uc_fini_misc(dev_priv);
if (ret != -EIO)
@@ -5520,7 +5598,8 @@ err_unlock:
* for all other failure, such as an allocation failure, bail.
*/
if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
- DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
+ i915_load_error(dev_priv,
+ "Failed to initialize GPU, declaring it wedged!\n");
i915_gem_set_wedged(dev_priv);
}
ret = 0;
@@ -5530,6 +5609,28 @@ err_unlock:
return ret;
}
+void i915_gem_fini(struct drm_i915_private *dev_priv)
+{
+ i915_gem_suspend_late(dev_priv);
+
+ /* Flush any outstanding unpin_work. */
+ i915_gem_drain_workqueue(dev_priv);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ intel_uc_fini_hw(dev_priv);
+ intel_uc_fini(dev_priv);
+ i915_gem_cleanup_engines(dev_priv);
+ i915_gem_contexts_fini(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ intel_uc_fini_misc(dev_priv);
+ i915_gem_cleanup_userptr(dev_priv);
+
+ i915_gem_drain_freed_objects(dev_priv);
+
+ WARN_ON(!list_empty(&dev_priv->contexts.list));
+}
+
void i915_gem_init_mmio(struct drm_i915_private *i915)
{
i915_gem_sanitize(i915);
@@ -5694,16 +5795,17 @@ int i915_gem_freeze(struct drm_i915_private *dev_priv)
return 0;
}
-int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
+int i915_gem_freeze_late(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
struct list_head *phases[] = {
- &dev_priv->mm.unbound_list,
- &dev_priv->mm.bound_list,
+ &i915->mm.unbound_list,
+ &i915->mm.bound_list,
NULL
- }, **p;
+ }, **phase;
- /* Called just before we write the hibernation image.
+ /*
+ * Called just before we write the hibernation image.
*
* We need to update the domain tracking to reflect that the CPU
* will be accessing all the pages to create and restore from the
@@ -5717,15 +5819,15 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
* the objects as well, see i915_gem_freeze()
*/
- i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
- i915_gem_drain_freed_objects(dev_priv);
+ i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
+ i915_gem_drain_freed_objects(i915);
- spin_lock(&dev_priv->mm.obj_lock);
- for (p = phases; *p; p++) {
- list_for_each_entry(obj, *p, mm.link)
- __start_cpu_write(obj);
+ mutex_lock(&i915->drm.struct_mutex);
+ for (phase = phases; *phase; phase++) {
+ list_for_each_entry(obj, *phase, mm.link)
+ WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
}
- spin_unlock(&dev_priv->mm.obj_lock);
+ mutex_unlock(&i915->drm.struct_mutex);
return 0;
}
@@ -6045,16 +6147,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
goto err_unlock;
}
- pages = fetch_and_zero(&obj->mm.pages);
- if (pages) {
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
-
- __i915_gem_object_reset_page_iter(obj);
-
- spin_lock(&i915->mm.obj_lock);
- list_del(&obj->mm.link);
- spin_unlock(&i915->mm.obj_lock);
- }
+ pages = __i915_gem_object_unset_pages(obj);
obj->ops = &i915_gem_phys_ops;
@@ -6072,7 +6165,11 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
err_xfer:
obj->ops = &i915_gem_object_ops;
- obj->mm.pages = pages;
+ if (!IS_ERR_OR_NULL(pages)) {
+ unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
+
+ __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+ }
err_unlock:
mutex_unlock(&obj->mm.lock);
return err;
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 525920404ede..e46592956872 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -26,6 +26,7 @@
#define __I915_GEM_H__
#include <linux/bug.h>
+#include <linux/interrupt.h>
struct drm_i915_private;
@@ -62,9 +63,12 @@ struct drm_i915_private;
#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
#define GEM_TRACE(...) trace_printk(__VA_ARGS__)
#define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL)
+#define GEM_TRACE_DUMP_ON(expr) \
+ do { if (expr) ftrace_dump(DUMP_ALL); } while (0)
#else
#define GEM_TRACE(...) do { } while (0)
#define GEM_TRACE_DUMP() do { } while (0)
+#define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
#define I915_NUM_ENGINES 8
@@ -72,4 +76,21 @@ struct drm_i915_private;
void i915_gem_park(struct drm_i915_private *i915);
void i915_gem_unpark(struct drm_i915_private *i915);
+static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
+{
+ if (atomic_inc_return(&t->count) == 1)
+ tasklet_unlock_wait(t);
+}
+
+static inline void __tasklet_enable_sync_once(struct tasklet_struct *t)
+{
+ if (atomic_dec_return(&t->count) == 0)
+ tasklet_kill(t);
+}
+
+static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
+{
+ return !atomic_read(&t->count);
+}
+
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 060335d3d9e0..b10770cfccd2 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -127,14 +127,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
struct intel_context *ce = &ctx->__engine[n];
- if (!ce->state)
- continue;
-
- WARN_ON(ce->pin_count);
- if (ce->ring)
- intel_ring_free(ce->ring);
-
- __i915_gem_object_release_unless_active(ce->state->obj);
+ if (ce->ops)
+ ce->ops->destroy(ce);
}
kfree(ctx->name);
@@ -203,7 +197,7 @@ static void context_close(struct i915_gem_context *ctx)
*/
lut_close(ctx);
if (ctx->ppgtt)
- i915_ppgtt_close(&ctx->ppgtt->base);
+ i915_ppgtt_close(&ctx->ppgtt->vm);
ctx->file_priv = ERR_PTR(-EBADF);
i915_gem_context_put(ctx);
@@ -214,10 +208,19 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
int ret;
unsigned int max;
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(dev_priv) >= 11) {
max = GEN11_MAX_CONTEXT_HW_ID;
- else
- max = MAX_CONTEXT_HW_ID;
+ } else {
+ /*
+ * When using GuC in proxy submission, GuC consumes the
+ * highest bit in the context id to indicate proxy submission.
+ */
+ if (USES_GUC_SUBMISSION(dev_priv))
+ max = MAX_GUC_CONTEXT_HW_ID;
+ else
+ max = MAX_CONTEXT_HW_ID;
+ }
+
ret = ida_simple_get(&dev_priv->contexts.hw_ida,
0, max, GFP_KERNEL);
@@ -246,7 +249,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
address_mode = INTEL_LEGACY_32B_CONTEXT;
- if (ppgtt && i915_vm_is_48bit(&ppgtt->base))
+ if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
address_mode = INTEL_LEGACY_64B_CONTEXT;
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
@@ -266,6 +269,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv)
{
struct i915_gem_context *ctx;
+ unsigned int n;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -283,6 +287,12 @@ __create_hw_context(struct drm_i915_private *dev_priv,
ctx->i915 = dev_priv;
ctx->sched.priority = I915_PRIORITY_NORMAL;
+ for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
+ struct intel_context *ce = &ctx->__engine[n];
+
+ ce->gem_context = ctx;
+ }
+
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
@@ -364,7 +374,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
if (USES_FULL_PPGTT(dev_priv)) {
struct i915_hw_ppgtt *ppgtt;
- ppgtt = i915_ppgtt_create(dev_priv, file_priv, ctx->name);
+ ppgtt = i915_ppgtt_create(dev_priv, file_priv);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@@ -502,8 +512,8 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
}
DRM_DEBUG_DRIVER("%s context support initialized\n",
- dev_priv->engine[RCS]->context_size ? "logical" :
- "fake");
+ DRIVER_CAPS(dev_priv)->has_logical_contexts ?
+ "logical" : "fake");
return 0;
}
@@ -514,16 +524,8 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- for_each_engine(engine, dev_priv, id) {
- engine->legacy_active_context = NULL;
- engine->legacy_active_ppgtt = NULL;
-
- if (!engine->last_retired_context)
- continue;
-
- intel_context_unpin(engine->last_retired_context, engine);
- engine->last_retired_context = NULL;
- }
+ for_each_engine(engine, dev_priv, id)
+ intel_engine_lost_context(engine);
}
void i915_gem_contexts_fini(struct drm_i915_private *i915)
@@ -583,68 +585,122 @@ last_request_on_engine(struct i915_timeline *timeline,
{
struct i915_request *rq;
- if (timeline == &engine->timeline)
- return NULL;
+ GEM_BUG_ON(timeline == &engine->timeline);
rq = i915_gem_active_raw(&timeline->last_request,
&engine->i915->drm.struct_mutex);
- if (rq && rq->engine == engine)
+ if (rq && rq->engine == engine) {
+ GEM_TRACE("last request for %s on engine %s: %llx:%d\n",
+ timeline->name, engine->name,
+ rq->fence.context, rq->fence.seqno);
+ GEM_BUG_ON(rq->timeline != timeline);
return rq;
+ }
return NULL;
}
-static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
+static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
{
- struct i915_timeline *timeline;
+ struct drm_i915_private *i915 = engine->i915;
+ const struct intel_context * const ce =
+ to_intel_context(i915->kernel_context, engine);
+ struct i915_timeline *barrier = ce->ring->timeline;
+ struct intel_ring *ring;
+ bool any_active = false;
- list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
- if (last_request_on_engine(timeline, engine))
+ lockdep_assert_held(&i915->drm.struct_mutex);
+ list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
+ struct i915_request *rq;
+
+ rq = last_request_on_engine(ring->timeline, engine);
+ if (!rq)
+ continue;
+
+ any_active = true;
+
+ if (rq->hw_context == ce)
+ continue;
+
+ /*
+ * Was this request submitted after the previous
+ * switch-to-kernel-context?
+ */
+ if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
+ GEM_TRACE("%s needs barrier for %llx:%d\n",
+ ring->timeline->name,
+ rq->fence.context,
+ rq->fence.seqno);
return false;
+ }
+
+ GEM_TRACE("%s has barrier after %llx:%d\n",
+ ring->timeline->name,
+ rq->fence.context,
+ rq->fence.seqno);
}
- return intel_engine_has_kernel_context(engine);
+ /*
+ * If any other timeline was still active and behind the last barrier,
+ * then our last switch-to-kernel-context must still be queued and
+ * will run last (leaving the engine in the kernel context when it
+ * eventually idles).
+ */
+ if (any_active)
+ return true;
+
+ /* The engine is idle; check that it is idling in the kernel context. */
+ return engine->last_retired_context == ce;
}
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
- struct i915_timeline *timeline;
enum intel_engine_id id;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
- i915_retire_requests(dev_priv);
+ lockdep_assert_held(&i915->drm.struct_mutex);
+ GEM_BUG_ON(!i915->kernel_context);
+
+ i915_retire_requests(i915);
- for_each_engine(engine, dev_priv, id) {
+ for_each_engine(engine, i915, id) {
+ struct intel_ring *ring;
struct i915_request *rq;
- if (engine_has_idle_kernel_context(engine))
+ GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
+ if (engine_has_kernel_context_barrier(engine))
continue;
- rq = i915_request_alloc(engine, dev_priv->kernel_context);
+ GEM_TRACE("emit barrier on %s\n", engine->name);
+
+ rq = i915_request_alloc(engine, i915->kernel_context);
if (IS_ERR(rq))
return PTR_ERR(rq);
/* Queue this switch after all other activity */
- list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
+ list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
struct i915_request *prev;
- prev = last_request_on_engine(timeline, engine);
- if (prev)
- i915_sw_fence_await_sw_fence_gfp(&rq->submit,
- &prev->submit,
- I915_FENCE_GFP);
+ prev = last_request_on_engine(ring->timeline, engine);
+ if (!prev)
+ continue;
+
+ if (prev->gem_context == i915->kernel_context)
+ continue;
+
+ GEM_TRACE("add barrier on %s for %llx:%d\n",
+ engine->name,
+ prev->fence.context,
+ prev->fence.seqno);
+ i915_sw_fence_await_sw_fence_gfp(&rq->submit,
+ &prev->submit,
+ I915_FENCE_GFP);
+ i915_timeline_sync_set(rq->timeline, &prev->fence);
}
- /*
- * Force a flush after the switch to ensure that all rendering
- * and operations prior to switching to the kernel context hits
- * memory. This should be guaranteed by the previous request,
- * but an extra layer of paranoia before we declare the system
- * idle (on suspend etc) is advisable!
- */
- __i915_request_add(rq, true);
+ i915_request_add(rq);
}
return 0;
@@ -664,7 +720,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct i915_gem_context *ctx;
int ret;
- if (!dev_priv->engine[RCS]->context_size)
+ if (!DRIVER_CAPS(dev_priv)->has_logical_contexts)
return -ENODEV;
if (args->pad != 0)
@@ -747,11 +803,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
break;
case I915_CONTEXT_PARAM_GTT_SIZE:
if (ctx->ppgtt)
- args->value = ctx->ppgtt->base.total;
+ args->value = ctx->ppgtt->vm.total;
else if (to_i915(dev)->mm.aliasing_ppgtt)
- args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
+ args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
else
- args->value = to_i915(dev)->ggtt.base.total;
+ args->value = to_i915(dev)->ggtt.vm.total;
break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
args->value = i915_gem_context_no_error_capture(ctx);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index ace3b129c189..b116e4942c10 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -30,6 +30,7 @@
#include <linux/radix-tree.h>
#include "i915_gem.h"
+#include "i915_scheduler.h"
struct pid;
@@ -45,6 +46,13 @@ struct intel_ring;
#define DEFAULT_CONTEXT_HANDLE 0
+struct intel_context;
+
+struct intel_context_ops {
+ void (*unpin)(struct intel_context *ce);
+ void (*destroy)(struct intel_context *ce);
+};
+
/**
* struct i915_gem_context - client state
*
@@ -144,11 +152,14 @@ struct i915_gem_context {
/** engine: per-engine logical HW state */
struct intel_context {
+ struct i915_gem_context *gem_context;
struct i915_vma *state;
struct intel_ring *ring;
u32 *lrc_reg_state;
u64 lrc_desc;
int pin_count;
+
+ const struct intel_context_ops *ops;
} __engine[I915_NUM_ENGINES];
/** ring_size: size for allocating the per-engine ring buffer */
@@ -263,25 +274,26 @@ to_intel_context(struct i915_gem_context *ctx,
return &ctx->__engine[engine->id];
}
-static inline struct intel_ring *
+static inline struct intel_context *
intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
{
return engine->context_pin(engine, ctx);
}
-static inline void __intel_context_pin(struct i915_gem_context *ctx,
- const struct intel_engine_cs *engine)
+static inline void __intel_context_pin(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
-
GEM_BUG_ON(!ce->pin_count);
ce->pin_count++;
}
-static inline void intel_context_unpin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static inline void intel_context_unpin(struct intel_context *ce)
{
- engine->context_unpin(engine, ctx);
+ GEM_BUG_ON(!ce->pin_count);
+ if (--ce->pin_count)
+ return;
+
+ GEM_BUG_ON(!ce->ops);
+ ce->ops->unpin(ce);
}
/* i915_gem_context.c */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 69a7aec49e84..82e2ca17a441 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -111,15 +111,6 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
i915_gem_object_unpin_map(obj);
}
-static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
-{
- return NULL;
-}
-
-static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
-
-}
static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
@@ -225,9 +216,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.unmap_dma_buf = i915_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.map = i915_gem_dmabuf_kmap,
- .map_atomic = i915_gem_dmabuf_kmap_atomic,
.unmap = i915_gem_dmabuf_kunmap,
- .unmap_atomic = i915_gem_dmabuf_kunmap_atomic,
.mmap = i915_gem_dmabuf_mmap,
.vmap = i915_gem_dmabuf_vmap,
.vunmap = i915_gem_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 54814a196ee4..02b83a5ed96c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -69,7 +69,8 @@ static int ggtt_flush(struct drm_i915_private *i915)
err = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 22df17c8ca9b..3f0c612d42e7 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -66,6 +66,15 @@ enum {
#define __I915_EXEC_ILLEGAL_FLAGS \
(__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK)
+/* Catch emission of unexpected errors for CI! */
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+#undef EINVAL
+#define EINVAL ({ \
+ DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
+ 22; \
+})
+#endif
+
/**
* DOC: User command execution
*
@@ -534,7 +543,8 @@ eb_add_vma(struct i915_execbuffer *eb,
* paranoia do it everywhere.
*/
if (i == batch_idx) {
- if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
+ if (entry->relocation_count &&
+ !(eb->flags[i] & EXEC_OBJECT_PINNED))
eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
if (eb->reloc_cache.has_fence)
eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
@@ -723,7 +733,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
return -ENOENT;
eb->ctx = ctx;
- eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;
+ eb->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &eb->i915->ggtt.vm;
eb->context_flags = 0;
if (ctx->flags & CONTEXT_NO_ZEROMAP)
@@ -921,7 +931,7 @@ static void reloc_gpu_flush(struct reloc_cache *cache)
i915_gem_object_unpin_map(cache->rq->batch->obj);
i915_gem_chipset_flush(cache->rq->i915);
- __i915_request_add(cache->rq, true);
+ i915_request_add(cache->rq);
cache->rq = NULL;
}
@@ -948,9 +958,9 @@ static void reloc_cache_reset(struct reloc_cache *cache)
if (cache->node.allocated) {
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
- ggtt->base.clear_range(&ggtt->base,
- cache->node.start,
- cache->node.size);
+ ggtt->vm.clear_range(&ggtt->vm,
+ cache->node.start,
+ cache->node.size);
drm_mm_remove_node(&cache->node);
} else {
i915_vma_unpin((struct i915_vma *)cache->node.mm);
@@ -1021,7 +1031,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
err = drm_mm_insert_node_in_range
- (&ggtt->base.mm, &cache->node,
+ (&ggtt->vm.mm, &cache->node,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
@@ -1042,9 +1052,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset = cache->node.start;
if (cache->node.allocated) {
wmb();
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, page),
- offset, I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, page),
+ offset, I915_CACHE_NONE, 0);
} else {
offset += page << PAGE_SHIFT;
}
@@ -1155,18 +1165,16 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto err_request;
GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
- i915_vma_move_to_active(batch, rq, 0);
- reservation_object_lock(batch->resv, NULL);
- reservation_object_add_excl_fence(batch->resv, &rq->fence);
- reservation_object_unlock(batch->resv);
- i915_vma_unpin(batch);
+ err = i915_vma_move_to_active(batch, rq, 0);
+ if (err)
+ goto skip_request;
- i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- reservation_object_lock(vma->resv, NULL);
- reservation_object_add_excl_fence(vma->resv, &rq->fence);
- reservation_object_unlock(vma->resv);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ goto skip_request;
rq->batch = batch;
+ i915_vma_unpin(batch);
cache->rq = rq;
cache->rq_cmd = cmd;
@@ -1175,6 +1183,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
/* Return with batch mapping (cmd) still pinned */
return 0;
+skip_request:
+ i915_request_skip(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
@@ -1761,25 +1771,6 @@ slow:
return eb_relocate_slow(eb);
}
-static void eb_export_fence(struct i915_vma *vma,
- struct i915_request *rq,
- unsigned int flags)
-{
- struct reservation_object *resv = vma->resv;
-
- /*
- * Ignore errors from failing to allocate the new fence, we can't
- * handle an error right now. Worst case should be missed
- * synchronisation leading to rendering corruption.
- */
- reservation_object_lock(resv, NULL);
- if (flags & EXEC_OBJECT_WRITE)
- reservation_object_add_excl_fence(resv, &rq->fence);
- else if (reservation_object_reserve_shared(resv) == 0)
- reservation_object_add_shared_fence(resv, &rq->fence);
- reservation_object_unlock(resv);
-}
-
static int eb_move_to_gpu(struct i915_execbuffer *eb)
{
const unsigned int count = eb->buffer_count;
@@ -1833,8 +1824,11 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
unsigned int flags = eb->flags[i];
struct i915_vma *vma = eb->vma[i];
- i915_vma_move_to_active(vma, eb->request, flags);
- eb_export_fence(vma, eb->request, flags);
+ err = i915_vma_move_to_active(vma, eb->request, flags);
+ if (unlikely(err)) {
+ i915_request_skip(eb->request, err);
+ return err;
+ }
__eb_unreserve_vma(vma, flags);
vma->exec_flags = NULL;
@@ -1874,45 +1868,6 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
return true;
}
-void i915_vma_move_to_active(struct i915_vma *vma,
- struct i915_request *rq,
- unsigned int flags)
-{
- struct drm_i915_gem_object *obj = vma->obj;
- const unsigned int idx = rq->engine->id;
-
- lockdep_assert_held(&rq->i915->drm.struct_mutex);
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-
- /*
- * Add a reference if we're newly entering the active list.
- * The order in which we add operations to the retirement queue is
- * vital here: mark_active adds to the start of the callback list,
- * such that subsequent callbacks are called first. Therefore we
- * add the active reference first and queue for it to be dropped
- * *last*.
- */
- if (!i915_vma_is_active(vma))
- obj->active_count++;
- i915_vma_set_active(vma, idx);
- i915_gem_active_set(&vma->last_read[idx], rq);
- list_move_tail(&vma->vm_link, &vma->vm->active_list);
-
- obj->write_domain = 0;
- if (flags & EXEC_OBJECT_WRITE) {
- obj->write_domain = I915_GEM_DOMAIN_RENDER;
-
- if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
- i915_gem_active_set(&obj->frontbuffer_write, rq);
-
- obj->read_domains = 0;
- }
- obj->read_domains |= I915_GEM_GPU_DOMAINS;
-
- if (flags & EXEC_OBJECT_NEEDS_FENCE)
- i915_gem_active_set(&vma->last_fence, rq);
-}
-
static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
{
u32 *cs;
@@ -2438,7 +2393,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb);
err_request:
- __i915_request_add(eb.request, err == 0);
+ i915_request_add(eb.request);
add_to_client(eb.request, file);
if (fences)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 996ab2ad6c45..f00c7fbef79e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -42,7 +42,7 @@
#include "intel_drv.h"
#include "intel_frontbuffer.h"
-#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
+#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
/**
* DOC: Global GTT views
@@ -195,18 +195,18 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
u32 unused)
{
u32 pte_flags;
- int ret;
+ int err;
if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
- ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
- vma->size);
- if (ret)
- return ret;
+ err = vma->vm->allocate_va_range(vma->vm,
+ vma->node.start, vma->size);
+ if (err)
+ return err;
}
- /* Currently applicable only to VLV */
+ /* Applicable to VLV, and gen8+ */
pte_flags = 0;
- if (vma->obj->gt_ro)
+ if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
@@ -244,10 +244,13 @@ static void clear_pages(struct i915_vma *vma)
}
static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level)
+ enum i915_cache_level level,
+ u32 flags)
{
- gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
- pte |= addr;
+ gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
+
+ if (unlikely(flags & PTE_READ_ONLY))
+ pte &= ~_PAGE_RW;
switch (level) {
case I915_CACHE_NONE:
@@ -375,37 +378,70 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
return pte;
}
+static void stash_init(struct pagestash *stash)
+{
+ pagevec_init(&stash->pvec);
+ spin_lock_init(&stash->lock);
+}
+
+static struct page *stash_pop_page(struct pagestash *stash)
+{
+ struct page *page = NULL;
+
+ spin_lock(&stash->lock);
+ if (likely(stash->pvec.nr))
+ page = stash->pvec.pages[--stash->pvec.nr];
+ spin_unlock(&stash->lock);
+
+ return page;
+}
+
+static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
+{
+ int nr;
+
+ spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
+
+ nr = min_t(int, pvec->nr, pagevec_space(&stash->pvec));
+ memcpy(stash->pvec.pages + stash->pvec.nr,
+ pvec->pages + pvec->nr - nr,
+ sizeof(pvec->pages[0]) * nr);
+ stash->pvec.nr += nr;
+
+ spin_unlock(&stash->lock);
+
+ pvec->nr -= nr;
+}
+
static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
{
- struct pagevec *pvec = &vm->free_pages;
- struct pagevec stash;
+ struct pagevec stack;
+ struct page *page;
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
i915_gem_shrink_all(vm->i915);
- if (likely(pvec->nr))
- return pvec->pages[--pvec->nr];
+ page = stash_pop_page(&vm->free_pages);
+ if (page)
+ return page;
if (!vm->pt_kmap_wc)
return alloc_page(gfp);
- /* A placeholder for a specific mutex to guard the WC stash */
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
-
/* Look in our global stash of WC pages... */
- pvec = &vm->i915->mm.wc_stash;
- if (likely(pvec->nr))
- return pvec->pages[--pvec->nr];
+ page = stash_pop_page(&vm->i915->mm.wc_stash);
+ if (page)
+ return page;
/*
- * Otherwise batch allocate pages to amoritize cost of set_pages_wc.
+ * Otherwise batch allocate pages to amortize cost of set_pages_wc.
*
* We have to be careful as page allocation may trigger the shrinker
* (via direct reclaim) which will fill up the WC stash underneath us.
* So we add our WB pages into a temporary pvec on the stack and merge
* them into the WC stash after all the allocations are complete.
*/
- pagevec_init(&stash);
+ pagevec_init(&stack);
do {
struct page *page;
@@ -413,59 +449,67 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
if (unlikely(!page))
break;
- stash.pages[stash.nr++] = page;
- } while (stash.nr < pagevec_space(pvec));
+ stack.pages[stack.nr++] = page;
+ } while (pagevec_space(&stack));
- if (stash.nr) {
- int nr = min_t(int, stash.nr, pagevec_space(pvec));
- struct page **pages = stash.pages + stash.nr - nr;
+ if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
+ page = stack.pages[--stack.nr];
- if (nr && !set_pages_array_wc(pages, nr)) {
- memcpy(pvec->pages + pvec->nr,
- pages, sizeof(pages[0]) * nr);
- pvec->nr += nr;
- stash.nr -= nr;
- }
+ /* Merge spare WC pages to the global stash */
+ stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
- pagevec_release(&stash);
+ /* Push any surplus WC pages onto the local VM stash */
+ if (stack.nr)
+ stash_push_pagevec(&vm->free_pages, &stack);
}
- return likely(pvec->nr) ? pvec->pages[--pvec->nr] : NULL;
+ /* Return unwanted leftovers */
+ if (unlikely(stack.nr)) {
+ WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
+ __pagevec_release(&stack);
+ }
+
+ return page;
}
static void vm_free_pages_release(struct i915_address_space *vm,
bool immediate)
{
- struct pagevec *pvec = &vm->free_pages;
+ struct pagevec *pvec = &vm->free_pages.pvec;
+ struct pagevec stack;
+ lockdep_assert_held(&vm->free_pages.lock);
GEM_BUG_ON(!pagevec_count(pvec));
if (vm->pt_kmap_wc) {
- struct pagevec *stash = &vm->i915->mm.wc_stash;
-
- /* When we use WC, first fill up the global stash and then
+ /*
+ * When we use WC, first fill up the global stash and then
* only if full immediately free the overflow.
*/
+ stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
- if (pagevec_space(stash)) {
- do {
- stash->pages[stash->nr++] =
- pvec->pages[--pvec->nr];
- if (!pvec->nr)
- return;
- } while (pagevec_space(stash));
-
- /* As we have made some room in the VM's free_pages,
- * we can wait for it to fill again. Unless we are
- * inside i915_address_space_fini() and must
- * immediately release the pages!
- */
- if (!immediate)
- return;
- }
+ /*
+ * As we have made some room in the VM's free_pages,
+ * we can wait for it to fill again. Unless we are
+ * inside i915_address_space_fini() and must
+ * immediately release the pages!
+ */
+ if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
+ return;
+
+ /*
+ * We have to drop the lock to allow ourselves to sleep,
+ * so take a copy of the pvec and clear the stash for
+ * others to use it as we sleep.
+ */
+ stack = *pvec;
+ pagevec_reinit(pvec);
+ spin_unlock(&vm->free_pages.lock);
+ pvec = &stack;
set_pages_array_wb(pvec->pages, pvec->nr);
+
+ spin_lock(&vm->free_pages.lock);
}
__pagevec_release(pvec);
@@ -481,20 +525,60 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page)
* unconditional might_sleep() for everybody.
*/
might_sleep();
- if (!pagevec_add(&vm->free_pages, page))
+ spin_lock(&vm->free_pages.lock);
+ if (!pagevec_add(&vm->free_pages.pvec, page))
vm_free_pages_release(vm, false);
+ spin_unlock(&vm->free_pages.lock);
+}
+
+static void i915_address_space_init(struct i915_address_space *vm,
+ struct drm_i915_private *dev_priv)
+{
+ /*
+ * The vm->mutex must be reclaim safe (for use in the shrinker).
+ * Do a dummy acquire now under fs_reclaim so that any allocation
+ * attempt holding the lock is immediately reported by lockdep.
+ */
+ mutex_init(&vm->mutex);
+ i915_gem_shrinker_taints_mutex(&vm->mutex);
+
+ GEM_BUG_ON(!vm->total);
+ drm_mm_init(&vm->mm, 0, vm->total);
+ vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
+
+ stash_init(&vm->free_pages);
+
+ INIT_LIST_HEAD(&vm->active_list);
+ INIT_LIST_HEAD(&vm->inactive_list);
+ INIT_LIST_HEAD(&vm->unbound_list);
+}
+
+static void i915_address_space_fini(struct i915_address_space *vm)
+{
+ spin_lock(&vm->free_pages.lock);
+ if (pagevec_count(&vm->free_pages.pvec))
+ vm_free_pages_release(vm, true);
+ GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
+ spin_unlock(&vm->free_pages.lock);
+
+ drm_mm_takedown(&vm->mm);
+
+ mutex_destroy(&vm->mutex);
}
static int __setup_page_dma(struct i915_address_space *vm,
struct i915_page_dma *p,
gfp_t gfp)
{
- p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY);
+ p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
if (unlikely(!p->page))
return -ENOMEM;
- p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ p->daddr = dma_map_page_attrs(vm->dma,
+ p->page, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_NO_WARN);
if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
vm_free_page(vm, p->page);
return -ENOMEM;
@@ -506,7 +590,7 @@ static int __setup_page_dma(struct i915_address_space *vm,
static int setup_page_dma(struct i915_address_space *vm,
struct i915_page_dma *p)
{
- return __setup_page_dma(vm, p, I915_GFP_DMA);
+ return __setup_page_dma(vm, p, __GFP_HIGHMEM);
}
static void cleanup_page_dma(struct i915_address_space *vm,
@@ -520,8 +604,8 @@ static void cleanup_page_dma(struct i915_address_space *vm,
#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
-#define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v))
-#define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v))
+#define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v))
+#define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v))
static void fill_page_dma(struct i915_address_space *vm,
struct i915_page_dma *p,
@@ -575,8 +659,11 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
if (unlikely(!page))
goto skip;
- addr = dma_map_page(vm->dma, page, 0, size,
- PCI_DMA_BIDIRECTIONAL);
+ addr = dma_map_page_attrs(vm->dma,
+ page, 0, size,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_NO_WARN);
if (unlikely(dma_mapping_error(vm->dma, addr)))
goto free_page;
@@ -614,7 +701,7 @@ static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
{
struct i915_page_table *pt;
- pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN);
+ pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
if (unlikely(!pt))
return ERR_PTR(-ENOMEM);
@@ -637,21 +724,20 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
struct i915_page_table *pt)
{
fill_px(vm, pt,
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
+ gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
}
-static void gen6_initialize_pt(struct i915_address_space *vm,
+static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt,
struct i915_page_table *pt)
{
- fill32_px(vm, pt,
- vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
+ fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte);
}
static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
{
struct i915_page_directory *pd;
- pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN);
+ pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
if (unlikely(!pd))
return ERR_PTR(-ENOMEM);
@@ -685,7 +771,7 @@ static int __pdp_init(struct i915_address_space *vm,
const unsigned int pdpes = i915_pdpes_per_pdp(vm);
pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
- GFP_KERNEL | __GFP_NOWARN);
+ I915_GFP_ALLOW_FAIL);
if (unlikely(!pdp->page_directory))
return -ENOMEM;
@@ -765,53 +851,6 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
}
-/* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct i915_request *rq,
- unsigned entry,
- dma_addr_t addr)
-{
- struct intel_engine_cs *engine = rq->engine;
- u32 *cs;
-
- BUG_ON(entry >= 4);
-
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
- *cs++ = upper_32_bits(addr);
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
- *cs++ = lower_32_bits(addr);
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq)
-{
- int i, ret;
-
- for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
- const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
-
- ret = gen8_write_pdp(rq, i, pd_daddr);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq)
-{
- return gen8_write_pdp(rq, 0, px_dma(&ppgtt->pml4));
-}
-
/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
* the page table structures, we mark them dirty so that
* context switching/execlist queuing code takes extra steps
@@ -819,7 +858,7 @@ static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
*/
static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
- ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
+ ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask;
}
/* Removes entries from a single page table, releasing it if it's empty.
@@ -833,7 +872,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
unsigned int pte = gen8_pte_index(start);
unsigned int pte_end = pte + num_entries;
const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
+ gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
gen8_pte_t *vaddr;
GEM_BUG_ON(num_entries > pt->used_ptes);
@@ -1005,14 +1044,15 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
struct i915_page_directory_pointer *pdp,
struct sgt_dma *iter,
struct gen8_insert_pte *idx,
- enum i915_cache_level cache_level)
+ enum i915_cache_level cache_level,
+ u32 flags)
{
struct i915_page_directory *pd;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
gen8_pte_t *vaddr;
bool ret;
- GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
+ GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
pd = pdp->page_directory[idx->pdpe];
vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
do {
@@ -1043,7 +1083,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
break;
}
- GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
+ GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
pd = pdp->page_directory[idx->pdpe];
}
@@ -1059,14 +1099,14 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level cache_level,
- u32 unused)
+ u32 flags)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = sgt_dma(vma);
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
- cache_level);
+ cache_level, flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
}
@@ -1074,9 +1114,10 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
struct i915_page_directory_pointer **pdps,
struct sgt_dma *iter,
- enum i915_cache_level cache_level)
+ enum i915_cache_level cache_level,
+ u32 flags)
{
- const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
u64 start = vma->node.start;
dma_addr_t rem = iter->sg->length;
@@ -1192,19 +1233,21 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level cache_level,
- u32 unused)
+ u32 flags)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = sgt_dma(vma);
struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
- gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level);
+ gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level,
+ flags);
} else {
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
- &iter, &idx, cache_level))
+ &iter, &idx, cache_level,
+ flags))
GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
@@ -1229,7 +1272,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
{
int ret;
- ret = setup_scratch_page(vm, I915_GFP_DMA);
+ ret = setup_scratch_page(vm, __GFP_HIGHMEM);
if (ret)
return ret;
@@ -1272,7 +1315,7 @@ free_scratch_page:
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
{
- struct i915_address_space *vm = &ppgtt->base;
+ struct i915_address_space *vm = &ppgtt->vm;
struct drm_i915_private *dev_priv = vm->i915;
enum vgt_g2v_type msg;
int i;
@@ -1333,13 +1376,13 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
int i;
for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
- if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
+ if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp)
continue;
- gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
+ gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]);
}
- cleanup_px(&ppgtt->base, &ppgtt->pml4);
+ cleanup_px(&ppgtt->vm, &ppgtt->pml4);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
@@ -1353,7 +1396,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
if (use_4lvl(vm))
gen8_ppgtt_cleanup_4lvl(ppgtt);
else
- gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
+ gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
gen8_free_scratch(vm);
}
@@ -1489,7 +1532,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
gen8_pte_t scratch_pte,
struct seq_file *m)
{
- struct i915_address_space *vm = &ppgtt->base;
+ struct i915_address_space *vm = &ppgtt->vm;
struct i915_page_directory *pd;
u32 pdpe;
@@ -1499,7 +1542,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
u64 pd_start = start;
u32 pde;
- if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
+ if (pdp->page_directory[pdpe] == ppgtt->vm.scratch_pd)
continue;
seq_printf(m, "\tPDPE #%d\n", pdpe);
@@ -1507,7 +1550,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
u32 pte;
gen8_pte_t *pt_vaddr;
- if (pd->page_table[pde] == ppgtt->base.scratch_pt)
+ if (pd->page_table[pde] == ppgtt->vm.scratch_pt)
continue;
pt_vaddr = kmap_atomic_px(pt);
@@ -1540,10 +1583,10 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
- struct i915_address_space *vm = &ppgtt->base;
+ struct i915_address_space *vm = &ppgtt->vm;
const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
- u64 start = 0, length = ppgtt->base.total;
+ gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+ u64 start = 0, length = ppgtt->vm.total;
if (use_4lvl(vm)) {
u64 pml4e;
@@ -1551,7 +1594,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
struct i915_page_directory_pointer *pdp;
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
- if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
+ if (pml4->pdps[pml4e] == ppgtt->vm.scratch_pdp)
continue;
seq_printf(m, " PML4E #%llu\n", pml4e);
@@ -1564,10 +1607,10 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
{
- struct i915_address_space *vm = &ppgtt->base;
+ struct i915_address_space *vm = &ppgtt->vm;
struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
struct i915_page_directory *pd;
- u64 start = 0, length = ppgtt->base.total;
+ u64 start = 0, length = ppgtt->vm.total;
u64 from = start;
unsigned int pdpe;
@@ -1601,211 +1644,153 @@ unwind:
* space.
*
*/
-static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
{
- struct i915_address_space *vm = &ppgtt->base;
- struct drm_i915_private *dev_priv = vm->i915;
- int ret;
+ struct i915_hw_ppgtt *ppgtt;
+ int err;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&ppgtt->ref);
+
+ ppgtt->vm.i915 = i915;
+ ppgtt->vm.dma = &i915->drm.pdev->dev;
- ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
+ ppgtt->vm.total = USES_FULL_48BIT_PPGTT(i915) ?
1ULL << 48 :
1ULL << 32;
+ /*
+ * From bdw, there is support for read-only pages in the PPGTT.
+ *
+ * XXX GVT is not honouring the lack of RW in the PTE bits.
+ */
+ ppgtt->vm.has_read_only = !intel_vgpu_active(i915);
+
+ i915_address_space_init(&ppgtt->vm, i915);
+
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
*/
- if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
- ppgtt->base.pt_kmap_wc = true;
+ if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915))
+ ppgtt->vm.pt_kmap_wc = true;
- ret = gen8_init_scratch(&ppgtt->base);
- if (ret) {
- ppgtt->base.total = 0;
- return ret;
- }
+ err = gen8_init_scratch(&ppgtt->vm);
+ if (err)
+ goto err_free;
- if (use_4lvl(vm)) {
- ret = setup_px(&ppgtt->base, &ppgtt->pml4);
- if (ret)
- goto free_scratch;
+ if (use_4lvl(&ppgtt->vm)) {
+ err = setup_px(&ppgtt->vm, &ppgtt->pml4);
+ if (err)
+ goto err_scratch;
- gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
+ gen8_initialize_pml4(&ppgtt->vm, &ppgtt->pml4);
- ppgtt->switch_mm = gen8_mm_switch_4lvl;
- ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
- ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
- ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
+ ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
+ ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
+ ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
} else {
- ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
- if (ret)
- goto free_scratch;
+ err = __pdp_init(&ppgtt->vm, &ppgtt->pdp);
+ if (err)
+ goto err_scratch;
- if (intel_vgpu_active(dev_priv)) {
- ret = gen8_preallocate_top_level_pdp(ppgtt);
- if (ret) {
+ if (intel_vgpu_active(i915)) {
+ err = gen8_preallocate_top_level_pdp(ppgtt);
+ if (err) {
__pdp_fini(&ppgtt->pdp);
- goto free_scratch;
+ goto err_scratch;
}
}
- ppgtt->switch_mm = gen8_mm_switch_3lvl;
- ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
- ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
- ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
+ ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
+ ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl;
+ ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl;
}
- if (intel_vgpu_active(dev_priv))
+ if (intel_vgpu_active(i915))
gen8_ppgtt_notify_vgt(ppgtt, true);
- ppgtt->base.cleanup = gen8_ppgtt_cleanup;
- ppgtt->base.unbind_vma = ppgtt_unbind_vma;
- ppgtt->base.bind_vma = ppgtt_bind_vma;
- ppgtt->base.set_pages = ppgtt_set_pages;
- ppgtt->base.clear_pages = clear_pages;
+ ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
ppgtt->debug_dump = gen8_dump_ppgtt;
- return 0;
+ ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
+ ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
+ ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
+ ppgtt->vm.vma_ops.clear_pages = clear_pages;
-free_scratch:
- gen8_free_scratch(&ppgtt->base);
- return ret;
+ return ppgtt;
+
+err_scratch:
+ gen8_free_scratch(&ppgtt->vm);
+err_free:
+ kfree(ppgtt);
+ return ERR_PTR(err);
}
-static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
+static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
{
- struct i915_address_space *vm = &ppgtt->base;
- struct i915_page_table *unused;
- gen6_pte_t scratch_pte;
- u32 pd_entry, pte, pde;
- u32 start = 0, length = ppgtt->base.total;
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
+ struct i915_page_table *pt;
+ u32 pte, pde;
- scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, 0);
+ gen6_for_all_pdes(pt, &base->pd, pde) {
+ gen6_pte_t *vaddr;
+
+ if (pt == base->vm.scratch_pt)
+ continue;
+
+ if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
+ u32 expected =
+ GEN6_PDE_ADDR_ENCODE(px_dma(pt)) |
+ GEN6_PDE_VALID;
+ u32 pd_entry = readl(ppgtt->pd_addr + pde);
+
+ if (pd_entry != expected)
+ seq_printf(m,
+ "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
+ pde,
+ pd_entry,
+ expected);
+
+ seq_printf(m, "\tPDE: %x\n", pd_entry);
+ }
- gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
- u32 expected;
- gen6_pte_t *pt_vaddr;
- const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
- pd_entry = readl(ppgtt->pd_addr + pde);
- expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
-
- if (pd_entry != expected)
- seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
- pde,
- pd_entry,
- expected);
- seq_printf(m, "\tPDE: %x\n", pd_entry);
-
- pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
-
- for (pte = 0; pte < GEN6_PTES; pte+=4) {
- unsigned long va =
- (pde * PAGE_SIZE * GEN6_PTES) +
- (pte * PAGE_SIZE);
+ vaddr = kmap_atomic_px(base->pd.page_table[pde]);
+ for (pte = 0; pte < GEN6_PTES; pte += 4) {
int i;
- bool found = false;
+
for (i = 0; i < 4; i++)
- if (pt_vaddr[pte + i] != scratch_pte)
- found = true;
- if (!found)
+ if (vaddr[pte + i] != scratch_pte)
+ break;
+ if (i == 4)
continue;
- seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
+ seq_printf(m, "\t\t(%03d, %04d) %08lx: ",
+ pde, pte,
+ (pde * GEN6_PTES + pte) * PAGE_SIZE);
for (i = 0; i < 4; i++) {
- if (pt_vaddr[pte + i] != scratch_pte)
- seq_printf(m, " %08x", pt_vaddr[pte + i]);
+ if (vaddr[pte + i] != scratch_pte)
+ seq_printf(m, " %08x", vaddr[pte + i]);
else
- seq_puts(m, " SCRATCH ");
+ seq_puts(m, " SCRATCH");
}
seq_puts(m, "\n");
}
- kunmap_atomic(pt_vaddr);
+ kunmap_atomic(vaddr);
}
}
/* Write pde (index) from the page directory @pd to the page table @pt */
-static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt,
+static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
const unsigned int pde,
const struct i915_page_table *pt)
{
/* Caller needs to make sure the write completes if necessary */
- writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
- ppgtt->pd_addr + pde);
-}
-
-/* Write all the page tables found in the ppgtt structure to incrementing page
- * directories. */
-static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt,
- u32 start, u32 length)
-{
- struct i915_page_table *pt;
- unsigned int pde;
-
- gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde)
- gen6_write_pde(ppgtt, pde, pt);
-
- mark_tlbs_dirty(ppgtt);
- wmb();
-}
-
-static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
-{
- GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
- return ppgtt->pd.base.ggtt_offset << 10;
-}
-
-static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq)
-{
- struct intel_engine_cs *engine = rq->engine;
- u32 *cs;
-
- /* NB: TLBs must be flushed and invalidated before a switch */
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(2);
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
- *cs++ = PP_DIR_DCLV_2G;
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
- *cs++ = get_pd_offset(ppgtt);
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq)
-{
- struct intel_engine_cs *engine = rq->engine;
- u32 *cs;
-
- /* NB: TLBs must be flushed and invalidated before a switch */
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(2);
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
- *cs++ = PP_DIR_DCLV_2G;
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
- *cs++ = get_pd_offset(ppgtt);
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq)
-{
- struct intel_engine_cs *engine = rq->engine;
- struct drm_i915_private *dev_priv = rq->i915;
-
- I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
- return 0;
+ iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
+ ppgtt->pd_addr + pde);
}
static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
@@ -1867,22 +1852,30 @@ static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
unsigned int first_entry = start >> PAGE_SHIFT;
unsigned int pde = first_entry / GEN6_PTES;
unsigned int pte = first_entry % GEN6_PTES;
unsigned int num_entries = length >> PAGE_SHIFT;
- gen6_pte_t scratch_pte =
- vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+ const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
while (num_entries) {
- struct i915_page_table *pt = ppgtt->pd.page_table[pde++];
- unsigned int end = min(pte + num_entries, GEN6_PTES);
+ struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
+ const unsigned int end = min(pte + num_entries, GEN6_PTES);
+ const unsigned int count = end - pte;
gen6_pte_t *vaddr;
- num_entries -= end - pte;
+ GEM_BUG_ON(pt == vm->scratch_pt);
+
+ num_entries -= count;
+
+ GEM_BUG_ON(count > pt->used_ptes);
+ pt->used_ptes -= count;
+ if (!pt->used_ptes)
+ ppgtt->scan_for_unused_pt = true;
- /* Note that the hw doesn't support removing PDE on the fly
+ /*
+ * Note that the hw doesn't support removing PDE on the fly
* (they are cached inside the context with no means to
* invalidate the cache), so we can only reset the PTE
* entries back to scratch.
@@ -1911,6 +1904,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sgt_dma iter = sgt_dma(vma);
gen6_pte_t *vaddr;
+ GEM_BUG_ON(ppgtt->pd.page_table[act_pt] == vm->scratch_pt);
+
vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
do {
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
@@ -1939,218 +1934,280 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
static int gen6_alloc_va_range(struct i915_address_space *vm,
u64 start, u64 length)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
struct i915_page_table *pt;
u64 from = start;
unsigned int pde;
bool flush = false;
- gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
+ gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) {
+ const unsigned int count = gen6_pte_count(start, length);
+
if (pt == vm->scratch_pt) {
pt = alloc_pt(vm);
if (IS_ERR(pt))
goto unwind_out;
- gen6_initialize_pt(vm, pt);
- ppgtt->pd.page_table[pde] = pt;
- gen6_write_pde(ppgtt, pde, pt);
- flush = true;
+ gen6_initialize_pt(ppgtt, pt);
+ ppgtt->base.pd.page_table[pde] = pt;
+
+ if (i915_vma_is_bound(ppgtt->vma,
+ I915_VMA_GLOBAL_BIND)) {
+ gen6_write_pde(ppgtt, pde, pt);
+ flush = true;
+ }
+
+ GEM_BUG_ON(pt->used_ptes);
}
+
+ pt->used_ptes += count;
}
if (flush) {
- mark_tlbs_dirty(ppgtt);
- wmb();
+ mark_tlbs_dirty(&ppgtt->base);
+ gen6_ggtt_invalidate(ppgtt->base.vm.i915);
}
return 0;
unwind_out:
- gen6_ppgtt_clear_range(vm, from, start);
+ gen6_ppgtt_clear_range(vm, from, start - from);
return -ENOMEM;
}
-static int gen6_init_scratch(struct i915_address_space *vm)
+static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
{
+ struct i915_address_space * const vm = &ppgtt->base.vm;
+ struct i915_page_table *unused;
+ u32 pde;
int ret;
- ret = setup_scratch_page(vm, I915_GFP_DMA);
+ ret = setup_scratch_page(vm, __GFP_HIGHMEM);
if (ret)
return ret;
+ ppgtt->scratch_pte =
+ vm->pte_encode(vm->scratch_page.daddr,
+ I915_CACHE_NONE, PTE_READ_ONLY);
+
vm->scratch_pt = alloc_pt(vm);
if (IS_ERR(vm->scratch_pt)) {
cleanup_scratch_page(vm);
return PTR_ERR(vm->scratch_pt);
}
- gen6_initialize_pt(vm, vm->scratch_pt);
+ gen6_initialize_pt(ppgtt, vm->scratch_pt);
+ gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
+ ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
return 0;
}
-static void gen6_free_scratch(struct i915_address_space *vm)
+static void gen6_ppgtt_free_scratch(struct i915_address_space *vm)
{
free_pt(vm, vm->scratch_pt);
cleanup_scratch_page(vm);
}
-static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+static void gen6_ppgtt_free_pd(struct gen6_hw_ppgtt *ppgtt)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct i915_page_directory *pd = &ppgtt->pd;
struct i915_page_table *pt;
u32 pde;
- drm_mm_remove_node(&ppgtt->node);
+ gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
+ if (pt != ppgtt->base.vm.scratch_pt)
+ free_pt(&ppgtt->base.vm, pt);
+}
+
+static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+{
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- gen6_for_all_pdes(pt, pd, pde)
- if (pt != vm->scratch_pt)
- free_pt(vm, pt);
+ i915_vma_destroy(ppgtt->vma);
- gen6_free_scratch(vm);
+ gen6_ppgtt_free_pd(ppgtt);
+ gen6_ppgtt_free_scratch(vm);
}
-static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
+static int pd_vma_set_pages(struct i915_vma *vma)
{
- struct i915_address_space *vm = &ppgtt->base;
- struct drm_i915_private *dev_priv = ppgtt->base.i915;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- int ret;
+ vma->pages = ERR_PTR(-ENODEV);
+ return 0;
+}
- /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
- * allocator works in address space sizes, so it's multiplied by page
- * size. We allocate at the top of the GTT to avoid fragmentation.
- */
- BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
+static void pd_vma_clear_pages(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!vma->pages);
- ret = gen6_init_scratch(vm);
- if (ret)
- return ret;
+ vma->pages = NULL;
+}
- ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
- GEN6_PD_SIZE, GEN6_PD_ALIGN,
- I915_COLOR_UNEVICTABLE,
- 0, ggtt->base.total,
- PIN_HIGH);
- if (ret)
- goto err_out;
+static int pd_vma_bind(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
+ struct gen6_hw_ppgtt *ppgtt = vma->private;
+ u32 ggtt_offset = i915_ggtt_offset(vma) / PAGE_SIZE;
+ struct i915_page_table *pt;
+ unsigned int pde;
- if (ppgtt->node.start < ggtt->mappable_end)
- DRM_DEBUG("Forced to use aperture for PDEs\n");
+ ppgtt->base.pd.base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
+ ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
- ppgtt->pd.base.ggtt_offset =
- ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
+ gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
+ gen6_write_pde(ppgtt, pde, pt);
- ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
- ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
+ mark_tlbs_dirty(&ppgtt->base);
+ gen6_ggtt_invalidate(ppgtt->base.vm.i915);
return 0;
-
-err_out:
- gen6_free_scratch(vm);
- return ret;
}
-static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
+static void pd_vma_unbind(struct i915_vma *vma)
{
- return gen6_ppgtt_allocate_page_directories(ppgtt);
-}
+ struct gen6_hw_ppgtt *ppgtt = vma->private;
+ struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt;
+ struct i915_page_table *pt;
+ unsigned int pde;
-static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
- u64 start, u64 length)
-{
- struct i915_page_table *unused;
- u32 pde;
+ if (!ppgtt->scan_for_unused_pt)
+ return;
+
+ /* Free all no longer used page tables */
+ gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) {
+ if (pt->used_ptes || pt == scratch_pt)
+ continue;
- gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
- ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
+ free_pt(&ppgtt->base.vm, pt);
+ ppgtt->base.pd.page_table[pde] = scratch_pt;
+ }
+
+ ppgtt->scan_for_unused_pt = false;
}
-static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
-{
- struct drm_i915_private *dev_priv = ppgtt->base.i915;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- int ret;
+static const struct i915_vma_ops pd_vma_ops = {
+ .set_pages = pd_vma_set_pages,
+ .clear_pages = pd_vma_clear_pages,
+ .bind_vma = pd_vma_bind,
+ .unbind_vma = pd_vma_unbind,
+};
- ppgtt->base.pte_encode = ggtt->base.pte_encode;
- if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
- ppgtt->switch_mm = gen6_mm_switch;
- else if (IS_HASWELL(dev_priv))
- ppgtt->switch_mm = hsw_mm_switch;
- else if (IS_GEN7(dev_priv))
- ppgtt->switch_mm = gen7_mm_switch;
- else
- BUG();
+static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
+{
+ struct drm_i915_private *i915 = ppgtt->base.vm.i915;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_vma *vma;
- ret = gen6_ppgtt_alloc(ppgtt);
- if (ret)
- return ret;
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+ GEM_BUG_ON(size > ggtt->vm.total);
- ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
+ vma = kmem_cache_zalloc(i915->vmas, GFP_KERNEL);
+ if (!vma)
+ return ERR_PTR(-ENOMEM);
- gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
- gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
+ init_request_active(&vma->last_fence, NULL);
- ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
- if (ret) {
- gen6_ppgtt_cleanup(&ppgtt->base);
- return ret;
- }
+ vma->vm = &ggtt->vm;
+ vma->ops = &pd_vma_ops;
+ vma->private = ppgtt;
- ppgtt->base.clear_range = gen6_ppgtt_clear_range;
- ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
- ppgtt->base.unbind_vma = ppgtt_unbind_vma;
- ppgtt->base.bind_vma = ppgtt_bind_vma;
- ppgtt->base.set_pages = ppgtt_set_pages;
- ppgtt->base.clear_pages = clear_pages;
- ppgtt->base.cleanup = gen6_ppgtt_cleanup;
- ppgtt->debug_dump = gen6_dump_ppgtt;
+ vma->active = RB_ROOT;
- DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
- ppgtt->node.size >> 20,
- ppgtt->node.start / PAGE_SIZE);
+ vma->size = size;
+ vma->fence_size = size;
+ vma->flags = I915_VMA_GGTT;
+ vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
- DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
- ppgtt->pd.base.ggtt_offset << 10);
+ INIT_LIST_HEAD(&vma->obj_link);
+ list_add(&vma->vm_link, &vma->vm->unbound_list);
- return 0;
+ return vma;
}
-static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_private *dev_priv)
+int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
{
- ppgtt->base.i915 = dev_priv;
- ppgtt->base.dma = &dev_priv->drm.pdev->dev;
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
- if (INTEL_GEN(dev_priv) < 8)
- return gen6_ppgtt_init(ppgtt);
- else
- return gen8_ppgtt_init(ppgtt);
+ /*
+ * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
+ * which will be pinned into every active context.
+ * (When vma->pin_count becomes atomic, I expect we will naturally
+ * need a larger, unpacked, type and kill this redundancy.)
+ */
+ if (ppgtt->pin_count++)
+ return 0;
+
+ /*
+ * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
+ * allocator works in address space sizes, so it's multiplied by page
+ * size. We allocate at the top of the GTT to avoid fragmentation.
+ */
+ return i915_vma_pin(ppgtt->vma,
+ 0, GEN6_PD_ALIGN,
+ PIN_GLOBAL | PIN_HIGH);
}
-static void i915_address_space_init(struct i915_address_space *vm,
- struct drm_i915_private *dev_priv,
- const char *name)
+void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
{
- drm_mm_init(&vm->mm, 0, vm->total);
- vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
- INIT_LIST_HEAD(&vm->active_list);
- INIT_LIST_HEAD(&vm->inactive_list);
- INIT_LIST_HEAD(&vm->unbound_list);
+ GEM_BUG_ON(!ppgtt->pin_count);
+ if (--ppgtt->pin_count)
+ return;
- list_add_tail(&vm->global_link, &dev_priv->vm_list);
- pagevec_init(&vm->free_pages);
+ i915_vma_unpin(ppgtt->vma);
}
-static void i915_address_space_fini(struct i915_address_space *vm)
+static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
{
- if (pagevec_count(&vm->free_pages))
- vm_free_pages_release(vm, true);
+ struct i915_ggtt * const ggtt = &i915->ggtt;
+ struct gen6_hw_ppgtt *ppgtt;
+ int err;
- drm_mm_takedown(&vm->mm);
- list_del(&vm->global_link);
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&ppgtt->base.ref);
+
+ ppgtt->base.vm.i915 = i915;
+ ppgtt->base.vm.dma = &i915->drm.pdev->dev;
+
+ ppgtt->base.vm.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
+
+ i915_address_space_init(&ppgtt->base.vm, i915);
+
+ ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
+ ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
+ ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
+ ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
+ ppgtt->base.debug_dump = gen6_dump_ppgtt;
+
+ ppgtt->base.vm.vma_ops.bind_vma = ppgtt_bind_vma;
+ ppgtt->base.vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
+ ppgtt->base.vm.vma_ops.set_pages = ppgtt_set_pages;
+ ppgtt->base.vm.vma_ops.clear_pages = clear_pages;
+
+ ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
+
+ err = gen6_ppgtt_init_scratch(ppgtt);
+ if (err)
+ goto err_free;
+
+ ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
+ if (IS_ERR(ppgtt->vma)) {
+ err = PTR_ERR(ppgtt->vma);
+ goto err_scratch;
+ }
+
+ return &ppgtt->base;
+
+err_scratch:
+ gen6_ppgtt_free_scratch(&ppgtt->base.vm);
+err_free:
+ kfree(ppgtt);
+ return ERR_PTR(err);
}
static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
@@ -2212,29 +2269,28 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
return 0;
}
+static struct i915_hw_ppgtt *
+__hw_ppgtt_create(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) < 8)
+ return gen6_ppgtt_create(i915);
+ else
+ return gen8_ppgtt_create(i915);
+}
+
struct i915_hw_ppgtt *
-i915_ppgtt_create(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *fpriv,
- const char *name)
+i915_ppgtt_create(struct drm_i915_private *i915,
+ struct drm_i915_file_private *fpriv)
{
struct i915_hw_ppgtt *ppgtt;
- int ret;
-
- ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return ERR_PTR(-ENOMEM);
- ret = __hw_ppgtt_init(ppgtt, dev_priv);
- if (ret) {
- kfree(ppgtt);
- return ERR_PTR(ret);
- }
+ ppgtt = __hw_ppgtt_create(i915);
+ if (IS_ERR(ppgtt))
+ return ppgtt;
- kref_init(&ppgtt->ref);
- i915_address_space_init(&ppgtt->base, dev_priv, name);
- ppgtt->base.file = fpriv;
+ ppgtt->vm.file = fpriv;
- trace_i915_ppgtt_create(&ppgtt->base);
+ trace_i915_ppgtt_create(&ppgtt->vm);
return ppgtt;
}
@@ -2268,16 +2324,16 @@ void i915_ppgtt_release(struct kref *kref)
struct i915_hw_ppgtt *ppgtt =
container_of(kref, struct i915_hw_ppgtt, ref);
- trace_i915_ppgtt_release(&ppgtt->base);
+ trace_i915_ppgtt_release(&ppgtt->vm);
- ppgtt_destroy_vma(&ppgtt->base);
+ ppgtt_destroy_vma(&ppgtt->vm);
- GEM_BUG_ON(!list_empty(&ppgtt->base.active_list));
- GEM_BUG_ON(!list_empty(&ppgtt->base.inactive_list));
- GEM_BUG_ON(!list_empty(&ppgtt->base.unbound_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->vm.active_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->vm.inactive_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list));
- ppgtt->base.cleanup(&ppgtt->base);
- i915_address_space_fini(&ppgtt->base);
+ ppgtt->vm.cleanup(&ppgtt->vm);
+ i915_address_space_fini(&ppgtt->vm);
kfree(ppgtt);
}
@@ -2373,7 +2429,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
i915_check_and_clear_faults(dev_priv);
- ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
+ ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
i915_ggtt_invalidate(dev_priv);
}
@@ -2419,7 +2475,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
- gen8_set_pte(pte, gen8_pte_encode(addr, level));
+ gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
ggtt->invalidate(vm->i915);
}
@@ -2427,14 +2483,19 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level level,
- u32 unused)
+ u32 flags)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct sgt_iter sgt_iter;
gen8_pte_t __iomem *gtt_entries;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
dma_addr_t addr;
+ /*
+ * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+ * not to allow the user to override access to a read only page.
+ */
+
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
gtt_entries += vma->node.start >> PAGE_SHIFT;
for_each_sgt_dma(addr, sgt_iter, vma->pages)
@@ -2500,7 +2561,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
+ gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
gen8_pte_t __iomem *gtt_base =
(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@@ -2561,13 +2622,14 @@ struct insert_entries {
struct i915_address_space *vm;
struct i915_vma *vma;
enum i915_cache_level level;
+ u32 flags;
};
static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
{
struct insert_entries *arg = _arg;
- gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0);
+ gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
bxt_vtd_ggtt_wa(arg->vm);
return 0;
@@ -2576,9 +2638,9 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
struct i915_vma *vma,
enum i915_cache_level level,
- u32 unused)
+ u32 flags)
{
- struct insert_entries arg = { vm, vma, level };
+ struct insert_entries arg = { vm, vma, level, flags };
stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
}
@@ -2669,9 +2731,9 @@ static int ggtt_bind_vma(struct i915_vma *vma,
struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags;
- /* Currently applicable only to VLV */
+ /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
pte_flags = 0;
- if (obj->gt_ro)
+ if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY;
intel_runtime_pm_get(i915);
@@ -2709,23 +2771,22 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
/* Currently applicable only to VLV */
pte_flags = 0;
- if (vma->obj->gt_ro)
+ if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
if (flags & I915_VMA_LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
- if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
- appgtt->base.allocate_va_range) {
- ret = appgtt->base.allocate_va_range(&appgtt->base,
- vma->node.start,
- vma->size);
+ if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+ ret = appgtt->vm.allocate_va_range(&appgtt->vm,
+ vma->node.start,
+ vma->size);
if (ret)
return ret;
}
- appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
- pte_flags);
+ appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level,
+ pte_flags);
}
if (flags & I915_VMA_GLOBAL_BIND) {
@@ -2748,7 +2809,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
}
if (vma->flags & I915_VMA_LOCAL_BIND) {
- struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
+ struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm;
vm->clear_range(vm, vma->node.start, vma->size);
}
@@ -2762,7 +2823,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (unlikely(ggtt->do_idle_maps)) {
- if (i915_gem_wait_for_idle(dev_priv, 0)) {
+ if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
@@ -2811,34 +2872,32 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
struct i915_hw_ppgtt *ppgtt;
int err;
- ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]");
+ ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM));
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
- if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
+ if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
err = -ENODEV;
goto err_ppgtt;
}
- if (ppgtt->base.allocate_va_range) {
- /* Note we only pre-allocate as far as the end of the global
- * GTT. On 48b / 4-level page-tables, the difference is very,
- * very significant! We have to preallocate as GVT/vgpu does
- * not like the page directory disappearing.
- */
- err = ppgtt->base.allocate_va_range(&ppgtt->base,
- 0, ggtt->base.total);
- if (err)
- goto err_ppgtt;
- }
+ /*
+ * Note we only pre-allocate as far as the end of the global
+ * GTT. On 48b / 4-level page-tables, the difference is very,
+ * very significant! We have to preallocate as GVT/vgpu does
+ * not like the page directory disappearing.
+ */
+ err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
+ if (err)
+ goto err_ppgtt;
i915->mm.aliasing_ppgtt = ppgtt;
- GEM_BUG_ON(ggtt->base.bind_vma != ggtt_bind_vma);
- ggtt->base.bind_vma = aliasing_gtt_bind_vma;
+ GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
+ ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
- GEM_BUG_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
- ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
+ GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
+ ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
return 0;
@@ -2858,8 +2917,8 @@ void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
i915_ppgtt_put(ppgtt);
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
}
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
@@ -2883,7 +2942,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
return ret;
/* Reserve a mappable slot for our lockless error capture */
- ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
+ ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
@@ -2891,16 +2950,15 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
return ret;
/* Clear any non-preallocated blocks */
- drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
+ drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
- ggtt->base.clear_range(&ggtt->base, hole_start,
- hole_end - hole_start);
+ ggtt->vm.clear_range(&ggtt->vm, hole_start,
+ hole_end - hole_start);
}
/* And finally clear the reserved guard page */
- ggtt->base.clear_range(&ggtt->base,
- ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
+ ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
ret = i915_gem_init_aliasing_ppgtt(dev_priv);
@@ -2925,30 +2983,26 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
struct i915_vma *vma, *vn;
struct pagevec *pvec;
- ggtt->base.closed = true;
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- GEM_BUG_ON(!list_empty(&ggtt->base.active_list));
- list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
- WARN_ON(i915_vma_unbind(vma));
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- i915_gem_cleanup_stolen(&dev_priv->drm);
+ ggtt->vm.closed = true;
mutex_lock(&dev_priv->drm.struct_mutex);
i915_gem_fini_aliasing_ppgtt(dev_priv);
+ GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
+ list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link)
+ WARN_ON(i915_vma_unbind(vma));
+
if (drm_mm_node_allocated(&ggtt->error_capture))
drm_mm_remove_node(&ggtt->error_capture);
- if (drm_mm_initialized(&ggtt->base.mm)) {
+ if (drm_mm_initialized(&ggtt->vm.mm)) {
intel_vgt_deballoon(dev_priv);
- i915_address_space_fini(&ggtt->base);
+ i915_address_space_fini(&ggtt->vm);
}
- ggtt->base.cleanup(&ggtt->base);
+ ggtt->vm.cleanup(&ggtt->vm);
- pvec = &dev_priv->mm.wc_stash;
+ pvec = &dev_priv->mm.wc_stash.pvec;
if (pvec->nr) {
set_pages_array_wb(pvec->pages, pvec->nr);
__pagevec_release(pvec);
@@ -2958,6 +3012,8 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
arch_phys_wc_del(ggtt->mtrr);
io_mapping_fini(&ggtt->iomap);
+
+ i915_gem_cleanup_stolen(&dev_priv->drm);
}
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -2996,7 +3052,7 @@ static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
- struct drm_i915_private *dev_priv = ggtt->base.i915;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
phys_addr_t phys_addr;
int ret;
@@ -3020,7 +3076,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return -ENOMEM;
}
- ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
+ ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
if (ret) {
DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
@@ -3326,7 +3382,7 @@ static void setup_private_pat(struct drm_i915_private *dev_priv)
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = ggtt->base.i915;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int size;
u16 snb_gmch_ctl;
@@ -3350,29 +3406,30 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
else
size = gen8_get_total_gtt_size(snb_gmch_ctl);
- ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
- ggtt->base.cleanup = gen6_gmch_remove;
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
- ggtt->base.set_pages = ggtt_set_pages;
- ggtt->base.clear_pages = clear_pages;
- ggtt->base.insert_page = gen8_ggtt_insert_page;
- ggtt->base.clear_range = nop_clear_range;
+ ggtt->vm.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
+ ggtt->vm.cleanup = gen6_gmch_remove;
+ ggtt->vm.insert_page = gen8_ggtt_insert_page;
+ ggtt->vm.clear_range = nop_clear_range;
if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
- ggtt->base.clear_range = gen8_ggtt_clear_range;
+ ggtt->vm.clear_range = gen8_ggtt_clear_range;
- ggtt->base.insert_entries = gen8_ggtt_insert_entries;
+ ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
/* Serialize GTT updates with aperture access on BXT if VT-d is on. */
if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
- ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
- ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
- if (ggtt->base.clear_range != nop_clear_range)
- ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+ ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+ ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
+ if (ggtt->vm.clear_range != nop_clear_range)
+ ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
}
ggtt->invalidate = gen6_ggtt_invalidate;
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
+
setup_private_pat(dev_priv);
return ggtt_probe_common(ggtt, size);
@@ -3380,7 +3437,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = ggtt->base.i915;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int size;
u16 snb_gmch_ctl;
@@ -3407,29 +3464,30 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
size = gen6_get_total_gtt_size(snb_gmch_ctl);
- ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
+ ggtt->vm.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
- ggtt->base.clear_range = gen6_ggtt_clear_range;
- ggtt->base.insert_page = gen6_ggtt_insert_page;
- ggtt->base.insert_entries = gen6_ggtt_insert_entries;
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
- ggtt->base.set_pages = ggtt_set_pages;
- ggtt->base.clear_pages = clear_pages;
- ggtt->base.cleanup = gen6_gmch_remove;
+ ggtt->vm.clear_range = gen6_ggtt_clear_range;
+ ggtt->vm.insert_page = gen6_ggtt_insert_page;
+ ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+ ggtt->vm.cleanup = gen6_gmch_remove;
ggtt->invalidate = gen6_ggtt_invalidate;
if (HAS_EDRAM(dev_priv))
- ggtt->base.pte_encode = iris_pte_encode;
+ ggtt->vm.pte_encode = iris_pte_encode;
else if (IS_HASWELL(dev_priv))
- ggtt->base.pte_encode = hsw_pte_encode;
+ ggtt->vm.pte_encode = hsw_pte_encode;
else if (IS_VALLEYVIEW(dev_priv))
- ggtt->base.pte_encode = byt_pte_encode;
+ ggtt->vm.pte_encode = byt_pte_encode;
else if (INTEL_GEN(dev_priv) >= 7)
- ggtt->base.pte_encode = ivb_pte_encode;
+ ggtt->vm.pte_encode = ivb_pte_encode;
else
- ggtt->base.pte_encode = snb_pte_encode;
+ ggtt->vm.pte_encode = snb_pte_encode;
+
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
return ggtt_probe_common(ggtt, size);
}
@@ -3441,7 +3499,7 @@ static void i915_gmch_remove(struct i915_address_space *vm)
static int i915_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = ggtt->base.i915;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
phys_addr_t gmadr_base;
int ret;
@@ -3451,26 +3509,25 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
return -EIO;
}
- intel_gtt_get(&ggtt->base.total,
- &gmadr_base,
- &ggtt->mappable_end);
+ intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
ggtt->gmadr =
(struct resource) DEFINE_RES_MEM(gmadr_base,
ggtt->mappable_end);
ggtt->do_idle_maps = needs_idle_maps(dev_priv);
- ggtt->base.insert_page = i915_ggtt_insert_page;
- ggtt->base.insert_entries = i915_ggtt_insert_entries;
- ggtt->base.clear_range = i915_ggtt_clear_range;
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
- ggtt->base.set_pages = ggtt_set_pages;
- ggtt->base.clear_pages = clear_pages;
- ggtt->base.cleanup = i915_gmch_remove;
+ ggtt->vm.insert_page = i915_ggtt_insert_page;
+ ggtt->vm.insert_entries = i915_ggtt_insert_entries;
+ ggtt->vm.clear_range = i915_ggtt_clear_range;
+ ggtt->vm.cleanup = i915_gmch_remove;
ggtt->invalidate = gmch_ggtt_invalidate;
+ ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
+ ggtt->vm.vma_ops.clear_pages = clear_pages;
+
if (unlikely(ggtt->do_idle_maps))
DRM_INFO("applying Ironlake quirks for intel_iommu\n");
@@ -3486,8 +3543,8 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
- ggtt->base.i915 = dev_priv;
- ggtt->base.dma = &dev_priv->drm.pdev->dev;
+ ggtt->vm.i915 = dev_priv;
+ ggtt->vm.dma = &dev_priv->drm.pdev->dev;
if (INTEL_GEN(dev_priv) <= 5)
ret = i915_gmch_probe(ggtt);
@@ -3504,27 +3561,29 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
* restriction!
*/
if (USES_GUC(dev_priv)) {
- ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
- ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
+ ggtt->vm.total = min_t(u64, ggtt->vm.total, GUC_GGTT_TOP);
+ ggtt->mappable_end =
+ min_t(u64, ggtt->mappable_end, ggtt->vm.total);
}
- if ((ggtt->base.total - 1) >> 32) {
+ if ((ggtt->vm.total - 1) >> 32) {
DRM_ERROR("We never expected a Global GTT with more than 32bits"
" of address space! Found %lldM!\n",
- ggtt->base.total >> 20);
- ggtt->base.total = 1ULL << 32;
- ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
+ ggtt->vm.total >> 20);
+ ggtt->vm.total = 1ULL << 32;
+ ggtt->mappable_end =
+ min_t(u64, ggtt->mappable_end, ggtt->vm.total);
}
- if (ggtt->mappable_end > ggtt->base.total) {
+ if (ggtt->mappable_end > ggtt->vm.total) {
DRM_ERROR("mappable aperture extends past end of GGTT,"
" aperture=%pa, total=%llx\n",
- &ggtt->mappable_end, ggtt->base.total);
- ggtt->mappable_end = ggtt->base.total;
+ &ggtt->mappable_end, ggtt->vm.total);
+ ggtt->mappable_end = ggtt->vm.total;
}
/* GMADR is the PCI mmio aperture into the global GTT. */
- DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->base.total >> 20);
+ DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
DRM_DEBUG_DRIVER("DSM size = %lluM\n",
(u64)resource_size(&intel_graphics_stolen_res) >> 20);
@@ -3543,7 +3602,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
- INIT_LIST_HEAD(&dev_priv->vm_list);
+ stash_init(&dev_priv->mm.wc_stash);
/* Note that we use page colouring to enforce a guard page at the
* end of the address space. This is required as the CS may prefetch
@@ -3551,9 +3610,13 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
* and beyond the end of the GTT if we do not provide a guard.
*/
mutex_lock(&dev_priv->drm.struct_mutex);
- i915_address_space_init(&ggtt->base, dev_priv, "[global]");
+ i915_address_space_init(&ggtt->vm, dev_priv);
+
+ /* Only VLV supports read-only GGTT mappings */
+ ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
+
if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
- ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
+ ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
mutex_unlock(&dev_priv->drm.struct_mutex);
if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
@@ -3576,7 +3639,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
return 0;
out_gtt_cleanup:
- ggtt->base.cleanup(&ggtt->base);
+ ggtt->vm.cleanup(&ggtt->vm);
return ret;
}
@@ -3610,34 +3673,35 @@ void i915_ggtt_disable_guc(struct drm_i915_private *i915)
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct drm_i915_gem_object *obj, *on;
+ struct i915_vma *vma, *vn;
i915_check_and_clear_faults(dev_priv);
/* First fill our portion of the GTT with scratch pages */
- ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
+ ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
- ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
+ ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
/* clflush objects bound into the GGTT and rebind them. */
- list_for_each_entry_safe(obj, on, &dev_priv->mm.bound_list, mm.link) {
- bool ggtt_bound = false;
- struct i915_vma *vma;
+ GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
+ list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
- for_each_ggtt_vma(vma, obj) {
- if (!i915_vma_unbind(vma))
- continue;
+ if (!(vma->flags & I915_VMA_GLOBAL_BIND))
+ continue;
- WARN_ON(i915_vma_bind(vma, obj->cache_level,
- PIN_UPDATE));
- ggtt_bound = true;
- }
+ if (!i915_vma_unbind(vma))
+ continue;
- if (ggtt_bound)
+ WARN_ON(i915_vma_bind(vma,
+ obj ? obj->cache_level : 0,
+ PIN_UPDATE));
+ if (obj)
WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
}
- ggtt->base.closed = false;
+ ggtt->vm.closed = false;
+ i915_ggtt_invalidate(dev_priv);
if (INTEL_GEN(dev_priv) >= 8) {
struct intel_ppat *ppat = &dev_priv->ppat;
@@ -3646,23 +3710,6 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
dev_priv->ppat.update_hw(dev_priv);
return;
}
-
- if (USES_PPGTT(dev_priv)) {
- struct i915_address_space *vm;
-
- list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
- struct i915_hw_ppgtt *ppgtt;
-
- if (i915_is_ggtt(vm))
- ppgtt = dev_priv->mm.aliasing_ppgtt;
- else
- ppgtt = i915_vm_to_ppgtt(vm);
-
- gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
- }
- }
-
- i915_ggtt_invalidate(dev_priv);
}
static struct scatterlist *
@@ -3880,7 +3927,7 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm,
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
GEM_BUG_ON(range_overflows(offset, size, vm->total));
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
GEM_BUG_ON(drm_mm_node_allocated(node));
node->size = size;
@@ -3977,7 +4024,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
GEM_BUG_ON(start >= end);
GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
GEM_BUG_ON(drm_mm_node_allocated(node));
if (unlikely(range_overflows(start, size, end)))
@@ -3988,7 +4035,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
mode = DRM_MM_INSERT_BEST;
if (flags & PIN_HIGH)
- mode = DRM_MM_INSERT_HIGH;
+ mode = DRM_MM_INSERT_HIGHEST;
if (flags & PIN_MAPPABLE)
mode = DRM_MM_INSERT_LOW;
@@ -4008,6 +4055,15 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
if (err != -ENOSPC)
return err;
+ if (mode & DRM_MM_INSERT_ONCE) {
+ err = drm_mm_insert_node_in_range(&vm->mm, node,
+ size, alignment, color,
+ start, end,
+ DRM_MM_INSERT_BEST);
+ if (err != -ENOSPC)
+ return err;
+ }
+
if (flags & PIN_NOEVICT)
return -ENOSPC;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index aec4f73574f4..2a116a91420b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -58,6 +58,7 @@
struct drm_i915_file_private;
struct drm_i915_fence_reg;
+struct i915_vma;
typedef u32 gen6_pte_t;
typedef u64 gen8_pte_t;
@@ -65,7 +66,7 @@ typedef u64 gen8_pde_t;
typedef u64 gen8_ppgtt_pdpe_t;
typedef u64 gen8_ppgtt_pml4e_t;
-#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
+#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
@@ -254,6 +255,26 @@ struct i915_pml4 {
struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
};
+struct i915_vma_ops {
+ /* Map an object into an address space with the given cache flags. */
+ int (*bind_vma)(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
+ /*
+ * Unmap an object from an address space. This usually consists of
+ * setting the valid PTE entries to a reserved scratch page.
+ */
+ void (*unbind_vma)(struct i915_vma *vma);
+
+ int (*set_pages)(struct i915_vma *vma);
+ void (*clear_pages)(struct i915_vma *vma);
+};
+
+struct pagestash {
+ spinlock_t lock;
+ struct pagevec pvec;
+};
+
struct i915_address_space {
struct drm_mm mm;
struct drm_i915_private *i915;
@@ -267,12 +288,13 @@ struct i915_address_space {
* assign blame.
*/
struct drm_i915_file_private *file;
- struct list_head global_link;
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
u64 reserved; /* size addr space reserved */
bool closed;
+ struct mutex mutex; /* protects vma and our lists */
+
struct i915_page_dma scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
@@ -308,8 +330,13 @@ struct i915_address_space {
*/
struct list_head unbound_list;
- struct pagevec free_pages;
- bool pt_kmap_wc;
+ struct pagestash free_pages;
+
+ /* Some systems require uncached updates of the page directories */
+ bool pt_kmap_wc:1;
+
+ /* Some systems support read-only mappings for GGTT and/or PPGTT */
+ bool has_read_only:1;
/* FIXME: Need a more generic return type */
gen6_pte_t (*pte_encode)(dma_addr_t addr,
@@ -331,15 +358,8 @@ struct i915_address_space {
enum i915_cache_level cache_level,
u32 flags);
void (*cleanup)(struct i915_address_space *vm);
- /** Unmap an object from an address space. This usually consists of
- * setting the valid PTE entries to a reserved scratch page. */
- void (*unbind_vma)(struct i915_vma *vma);
- /* Map an object into an address space with the given cache flags. */
- int (*bind_vma)(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags);
- int (*set_pages)(struct i915_vma *vma);
- void (*clear_pages)(struct i915_vma *vma);
+
+ struct i915_vma_ops vma_ops;
I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
I915_SELFTEST_DECLARE(bool scrub_64K);
@@ -367,7 +387,7 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm)
* the spec.
*/
struct i915_ggtt {
- struct i915_address_space base;
+ struct i915_address_space vm;
struct io_mapping iomap; /* Mapping to our CPU mappable region */
struct resource gmadr; /* GMADR resource */
@@ -385,9 +405,9 @@ struct i915_ggtt {
};
struct i915_hw_ppgtt {
- struct i915_address_space base;
+ struct i915_address_space vm;
struct kref ref;
- struct drm_mm_node node;
+
unsigned long pd_dirty_rings;
union {
struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */
@@ -395,13 +415,28 @@ struct i915_hw_ppgtt {
struct i915_page_directory pd; /* GEN6-7 */
};
+ void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
+};
+
+struct gen6_hw_ppgtt {
+ struct i915_hw_ppgtt base;
+
+ struct i915_vma *vma;
gen6_pte_t __iomem *pd_addr;
+ gen6_pte_t scratch_pte;
- int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
- struct i915_request *rq);
- void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
+ unsigned int pin_count;
+ bool scan_for_unused_pt;
};
+#define __to_gen6_ppgtt(base) container_of(base, struct gen6_hw_ppgtt, base)
+
+static inline struct gen6_hw_ppgtt *to_gen6_ppgtt(struct i915_hw_ppgtt *base)
+{
+ BUILD_BUG_ON(offsetof(struct gen6_hw_ppgtt, base));
+ return __to_gen6_ppgtt(base);
+}
+
/*
* gen6_for_each_pde() iterates over every pde from start until start+length.
* If start and start+length are not perfectly divisible, the macro will round
@@ -440,8 +475,8 @@ static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
const u64 mask = ~((1ULL << pde_shift) - 1);
u64 end;
- WARN_ON(length == 0);
- WARN_ON(offset_in_page(addr|length));
+ GEM_BUG_ON(length == 0);
+ GEM_BUG_ON(offset_in_page(addr | length));
end = addr + length;
@@ -543,7 +578,7 @@ static inline struct i915_ggtt *
i915_vm_to_ggtt(struct i915_address_space *vm)
{
GEM_BUG_ON(!i915_is_ggtt(vm));
- return container_of(vm, struct i915_ggtt, base);
+ return container_of(vm, struct i915_ggtt, vm);
}
#define INTEL_MAX_PPAT_ENTRIES 8
@@ -591,8 +626,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *fpriv,
- const char *name);
+ struct drm_i915_file_private *fpriv);
void i915_ppgtt_close(struct i915_address_space *vm);
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{
@@ -605,6 +639,9 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
kref_put(&ppgtt->ref, i915_ppgtt_release);
}
+int gen6_ppgtt_pin(struct i915_hw_ppgtt *base);
+void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base);
+
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index 54f00b350779..83e5e01fa9ea 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -141,7 +141,6 @@ struct drm_i915_gem_object {
* Is the object to be mapped as read-only to the GPU
* Only honoured if hardware has relevant pte bit
*/
- unsigned long gt_ro:1;
unsigned int cache_level:3;
unsigned int cache_coherent:2;
#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
@@ -268,7 +267,6 @@ struct drm_i915_gem_object {
union {
struct i915_gem_userptr {
uintptr_t ptr;
- unsigned read_only :1;
struct i915_mm_struct *mm;
struct i915_mmu_object *mmu_object;
@@ -337,26 +335,17 @@ __attribute__((nonnull))
static inline struct drm_i915_gem_object *
i915_gem_object_get(struct drm_i915_gem_object *obj)
{
- drm_gem_object_reference(&obj->base);
+ drm_gem_object_get(&obj->base);
return obj;
}
-__deprecated
-extern void drm_gem_object_reference(struct drm_gem_object *);
-
__attribute__((nonnull))
static inline void
i915_gem_object_put(struct drm_i915_gem_object *obj)
{
- __drm_gem_object_unreference(&obj->base);
+ __drm_gem_object_put(&obj->base);
}
-__deprecated
-extern void drm_gem_object_unreference(struct drm_gem_object *);
-
-__deprecated
-extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
-
static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
{
reservation_object_lock(obj->resv, NULL);
@@ -367,6 +356,18 @@ static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
reservation_object_unlock(obj->resv);
}
+static inline void
+i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
+{
+ obj->base.vma_node.readonly = true;
+}
+
+static inline bool
+i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
+{
+ return obj->base.vma_node.readonly;
+}
+
static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 1036e8686916..90baf9086d0a 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -194,7 +194,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
if (IS_ERR(so.obj))
return PTR_ERR(so.obj);
- so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.base, NULL);
+ so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(so.vma)) {
err = PTR_ERR(so.vma);
goto err_obj;
@@ -222,7 +222,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
goto err_unpin;
}
- i915_vma_move_to_active(so.vma, rq, 0);
+ err = i915_vma_move_to_active(so.vma, rq, 0);
err_unpin:
i915_vma_unpin(so.vma);
err_vma:
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 5757fb7c4b5a..ea90d3a0d511 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -23,6 +23,7 @@
*/
#include <linux/oom.h>
+#include <linux/sched/mm.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
@@ -172,7 +173,9 @@ i915_gem_shrink(struct drm_i915_private *i915,
* we will free as much as we can and hope to get a second chance.
*/
if (flags & I915_SHRINK_ACTIVE)
- i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+ i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
trace_i915_gem_shrink(i915, target, flags);
i915_retire_requests(i915);
@@ -392,7 +395,8 @@ shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock,
unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
do {
- if (i915_gem_wait_for_idle(i915, 0) == 0 &&
+ if (i915_gem_wait_for_idle(i915,
+ 0, MAX_SCHEDULE_TIMEOUT) == 0 &&
shrinker_lock(i915, unlock))
break;
@@ -466,7 +470,9 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
return NOTIFY_DONE;
/* Force everything onto the inactive lists */
- ret = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+ ret = i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
goto out;
@@ -480,7 +486,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
/* We also want to clear any cached iomaps as they wrap vmap */
list_for_each_entry_safe(vma, next,
- &i915->ggtt.base.inactive_list, vm_link) {
+ &i915->ggtt.vm.inactive_list, vm_link) {
unsigned long count = vma->node.size >> PAGE_SHIFT;
if (vma->iomap && i915_vma_unbind(vma) == 0)
freed_pages += count;
@@ -526,3 +532,14 @@ void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
unregister_shrinker(&i915->mm.shrinker);
}
+
+void i915_gem_shrinker_taints_mutex(struct mutex *mutex)
+{
+ if (!IS_ENABLED(CONFIG_LOCKDEP))
+ return;
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ mutex_lock(mutex);
+ mutex_unlock(mutex);
+ fs_reclaim_release(GFP_KERNEL);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index ad949cc30928..53440bf87650 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -254,6 +254,7 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv,
switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
default:
MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
+ /* fall through */
case GEN7_STOLEN_RESERVED_1M:
*size = 1024 * 1024;
break;
@@ -343,6 +344,35 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
*size = stolen_top - *base;
}
+static void icl_get_stolen_reserved(struct drm_i915_private *dev_priv,
+ resource_size_t *base,
+ resource_size_t *size)
+{
+ u64 reg_val = I915_READ64(GEN6_STOLEN_RESERVED);
+
+ DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
+
+ *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
+
+ switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
+ case GEN8_STOLEN_RESERVED_1M:
+ *size = 1024 * 1024;
+ break;
+ case GEN8_STOLEN_RESERVED_2M:
+ *size = 2 * 1024 * 1024;
+ break;
+ case GEN8_STOLEN_RESERVED_4M:
+ *size = 4 * 1024 * 1024;
+ break;
+ case GEN8_STOLEN_RESERVED_8M:
+ *size = 8 * 1024 * 1024;
+ break;
+ default:
+ *size = 8 * 1024 * 1024;
+ MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
+ }
+}
+
int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
resource_size_t reserved_base, stolen_top;
@@ -399,7 +429,9 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
gen7_get_stolen_reserved(dev_priv,
&reserved_base, &reserved_size);
break;
- default:
+ case 8:
+ case 9:
+ case 10:
if (IS_LP(dev_priv))
chv_get_stolen_reserved(dev_priv,
&reserved_base, &reserved_size);
@@ -407,6 +439,11 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
bdw_get_stolen_reserved(dev_priv,
&reserved_base, &reserved_size);
break;
+ case 11:
+ default:
+ icl_get_stolen_reserved(dev_priv, &reserved_base,
+ &reserved_size);
+ break;
}
/*
@@ -642,7 +679,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
if (ret)
goto err;
- vma = i915_vma_instance(obj, &ggtt->base, NULL);
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_pages;
@@ -653,7 +690,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
* setting up the GTT space. The actual reservation will occur
* later.
*/
- ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node,
+ ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
size, gtt_offset, obj->cache_level,
0);
if (ret) {
@@ -666,7 +703,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
- list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
+ list_move_tail(&vma->vm_link, &ggtt->vm.inactive_list);
spin_lock(&dev_priv->mm.obj_lock);
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 854bd51b9478..dcd6e230d16a 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
struct mm_struct *mm = obj->userptr.mm->mm;
unsigned int flags = 0;
- if (!obj->userptr.read_only)
+ if (!i915_gem_object_is_readonly(obj))
flags |= FOLL_WRITE;
ret = -EFAULT;
@@ -643,7 +643,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
if (pvec) /* defer to worker if malloc fails */
pinned = __get_user_pages_fast(obj->userptr.ptr,
num_pages,
- !obj->userptr.read_only,
+ !i915_gem_object_is_readonly(obj),
pvec);
}
@@ -789,10 +789,15 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -EFAULT;
if (args->flags & I915_USERPTR_READ_ONLY) {
- /* On almost all of the current hw, we cannot tell the GPU that a
- * page is readonly, so this is just a placeholder in the uAPI.
+ struct i915_hw_ppgtt *ppgtt;
+
+ /*
+ * On almost all of the older hw, we cannot tell the GPU that
+ * a page is readonly.
*/
- return -ENODEV;
+ ppgtt = dev_priv->kernel_context->ppgtt;
+ if (!ppgtt || !ppgtt->vm.has_read_only)
+ return -ENODEV;
}
obj = i915_gem_object_alloc(dev_priv);
@@ -806,7 +811,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
obj->userptr.ptr = args->user_ptr;
- obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
+ if (args->flags & I915_USERPTR_READ_ONLY)
+ i915_gem_object_set_readonly(obj);
/* And keep a pointer to the current->mm for resolving the user pages
* at binding. This means that we need to hook into the mmu_notifier
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index df234dc23274..f7f2aa71d8d9 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -31,6 +31,7 @@
#include <linux/stop_machine.h>
#include <linux/zlib.h>
#include <drm/drm_print.h>
+#include <linux/ascii85.h>
#include "i915_gpu_error.h"
#include "i915_drv.h"
@@ -335,21 +336,16 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
struct drm_i915_error_buffer *err,
int count)
{
- int i;
-
err_printf(m, "%s [%d]:\n", name, count);
while (count--) {
- err_printf(m, " %08x_%08x %8u %02x %02x [ ",
+ err_printf(m, " %08x_%08x %8u %02x %02x %02x",
upper_32_bits(err->gtt_offset),
lower_32_bits(err->gtt_offset),
err->size,
err->read_domains,
- err->write_domain);
- for (i = 0; i < I915_NUM_ENGINES; i++)
- err_printf(m, "%02x ", err->rseqno[i]);
-
- err_printf(m, "] %02x", err->wseqno);
+ err->write_domain,
+ err->wseqno);
err_puts(m, tiling_flag(err->tiling));
err_puts(m, dirty_flag(err->dirty));
err_puts(m, purgeable_flag(err->purgeable));
@@ -522,35 +518,12 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
-static int
-ascii85_encode_len(int len)
-{
- return DIV_ROUND_UP(len, 4);
-}
-
-static bool
-ascii85_encode(u32 in, char *out)
-{
- int i;
-
- if (in == 0)
- return false;
-
- out[5] = '\0';
- for (i = 5; i--; ) {
- out[i] = '!' + in % 85;
- in /= 85;
- }
-
- return true;
-}
-
static void print_error_obj(struct drm_i915_error_state_buf *m,
struct intel_engine_cs *engine,
const char *name,
struct drm_i915_error_object *obj)
{
- char out[6];
+ char out[ASCII85_BUFSZ];
int page;
if (!obj)
@@ -572,12 +545,8 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
len -= obj->unused;
len = ascii85_encode_len(len);
- for (i = 0; i < len; i++) {
- if (ascii85_encode(obj->pages[page][i], out))
- err_puts(m, out);
- else
- err_puts(m, "z");
- }
+ for (i = 0; i < len; i++)
+ err_puts(m, ascii85_encode(obj->pages[page][i], out));
}
err_puts(m, "\n");
}
@@ -973,8 +942,7 @@ i915_error_object_create(struct drm_i915_private *i915,
void __iomem *s;
int ret;
- ggtt->base.insert_page(&ggtt->base, dma, slot,
- I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
ret = compress_page(&compress, (void __force *)s, dst);
@@ -993,7 +961,7 @@ unwind:
out:
compress_fini(&compress, dst);
- ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
+ ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
return dst;
}
@@ -1022,13 +990,10 @@ static void capture_bo(struct drm_i915_error_buffer *err,
struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
- int i;
err->size = obj->base.size;
err->name = obj->base.name;
- for (i = 0; i < I915_NUM_ENGINES; i++)
- err->rseqno[i] = __active_get_seqno(&vma->last_read[i]);
err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
err->engine = __active_get_engine_id(&obj->frontbuffer_write);
@@ -1051,6 +1016,9 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
int i = 0;
list_for_each_entry(vma, head, vm_link) {
+ if (!vma->obj)
+ continue;
+
if (pinned_only && !i915_vma_is_pinned(vma))
continue;
@@ -1287,9 +1255,11 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
static void record_request(struct i915_request *request,
struct drm_i915_error_request *erq)
{
- erq->context = request->ctx->hw_id;
+ struct i915_gem_context *ctx = request->gem_context;
+
+ erq->context = ctx->hw_id;
erq->sched_attr = request->sched.attr;
- erq->ban_score = atomic_read(&request->ctx->ban_score);
+ erq->ban_score = atomic_read(&ctx->ban_score);
erq->seqno = request->global_seqno;
erq->jiffies = request->emitted_jiffies;
erq->start = i915_ggtt_offset(request->ring->vma);
@@ -1297,7 +1267,7 @@ static void record_request(struct i915_request *request,
erq->tail = request->tail;
rcu_read_lock();
- erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
+ erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0;
rcu_read_unlock();
}
@@ -1461,12 +1431,12 @@ static void gem_record_rings(struct i915_gpu_state *error)
request = i915_gem_find_active_request(engine);
if (request) {
+ struct i915_gem_context *ctx = request->gem_context;
struct intel_ring *ring;
- ee->vm = request->ctx->ppgtt ?
- &request->ctx->ppgtt->base : &ggtt->base;
+ ee->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &ggtt->vm;
- record_context(&ee->context, request->ctx);
+ record_context(&ee->context, ctx);
/* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten
@@ -1483,11 +1453,10 @@ static void gem_record_rings(struct i915_gpu_state *error)
ee->ctx =
i915_error_object_create(i915,
- to_intel_context(request->ctx,
- engine)->state);
+ request->hw_context->state);
error->simulated |=
- i915_gem_context_no_error_capture(request->ctx);
+ i915_gem_context_no_error_capture(ctx);
ee->rq_head = request->head;
ee->rq_post = request->postfix;
@@ -1563,17 +1532,17 @@ static void capture_active_buffers(struct i915_gpu_state *error)
static void capture_pinned_buffers(struct i915_gpu_state *error)
{
- struct i915_address_space *vm = &error->i915->ggtt.base;
+ struct i915_address_space *vm = &error->i915->ggtt.vm;
struct drm_i915_error_buffer *bo;
struct i915_vma *vma;
int count_inactive, count_active;
count_inactive = 0;
- list_for_each_entry(vma, &vm->active_list, vm_link)
+ list_for_each_entry(vma, &vm->inactive_list, vm_link)
count_inactive++;
count_active = 0;
- list_for_each_entry(vma, &vm->inactive_list, vm_link)
+ list_for_each_entry(vma, &vm->active_list, vm_link)
count_active++;
bo = NULL;
@@ -1667,7 +1636,16 @@ static void capture_reg_state(struct i915_gpu_state *error)
}
/* 4: Everything else */
- if (INTEL_GEN(dev_priv) >= 8) {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ error->ier = I915_READ(GEN8_DE_MISC_IER);
+ error->gtier[0] = I915_READ(GEN11_RENDER_COPY_INTR_ENABLE);
+ error->gtier[1] = I915_READ(GEN11_VCS_VECS_INTR_ENABLE);
+ error->gtier[2] = I915_READ(GEN11_GUC_SG_INTR_ENABLE);
+ error->gtier[3] = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+ error->gtier[4] = I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE);
+ error->gtier[5] = I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE);
+ error->ngtier = 6;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
error->ier = I915_READ(GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
error->gtier[i] = I915_READ(GEN8_GT_IER(i));
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index dac0f8c4c1cf..f893a4e8b783 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -58,7 +58,7 @@ struct i915_gpu_state {
u32 eir;
u32 pgtbl_er;
u32 ier;
- u32 gtier[4], ngtier;
+ u32 gtier[6], ngtier;
u32 ccid;
u32 derrmr;
u32 forcewake;
@@ -177,7 +177,7 @@ struct i915_gpu_state {
struct drm_i915_error_buffer {
u32 size;
u32 name;
- u32 rseqno[I915_NUM_ENGINES], wseqno;
+ u32 wseqno;
u64 gtt_offset;
u32 read_domains;
u32 write_domain;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c16cb025755e..90628a47ae17 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -115,6 +115,22 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
};
+static const u32 hpd_gen11[HPD_NUM_PINS] = {
+ [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
+ [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
+ [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
+ [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
+};
+
+static const u32 hpd_icp[HPD_NUM_PINS] = {
+ [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
+ [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
+ [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
+ [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
+ [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
+ [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
+};
+
/* IIR can theoretically queue up two events. Be paranoid. */
#define GEN8_IRQ_RESET_NDX(type, which) do { \
I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
@@ -247,9 +263,9 @@ static u32
gen11_gt_engine_identity(struct drm_i915_private * const i915,
const unsigned int bank, const unsigned int bit);
-bool gen11_reset_one_iir(struct drm_i915_private * const i915,
- const unsigned int bank,
- const unsigned int bit)
+static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
+ const unsigned int bank,
+ const unsigned int bit)
{
void __iomem * const regs = i915->regs;
u32 dw;
@@ -1138,21 +1154,21 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
static void notify_ring(struct intel_engine_cs *engine)
{
+ const u32 seqno = intel_engine_get_seqno(engine);
struct i915_request *rq = NULL;
+ struct task_struct *tsk = NULL;
struct intel_wait *wait;
- if (!engine->breadcrumbs.irq_armed)
+ if (unlikely(!engine->breadcrumbs.irq_armed))
return;
- atomic_inc(&engine->irq_count);
- set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
+ rcu_read_lock();
spin_lock(&engine->breadcrumbs.irq_lock);
wait = engine->breadcrumbs.irq_wait;
if (wait) {
- bool wakeup = engine->irq_seqno_barrier;
-
- /* We use a callback from the dma-fence to submit
+ /*
+ * We use a callback from the dma-fence to submit
* requests after waiting on our own requests. To
* ensure minimum delay in queuing the next request to
* hardware, signal the fence now rather than wait for
@@ -1163,19 +1179,26 @@ static void notify_ring(struct intel_engine_cs *engine)
* and to handle coalescing of multiple seqno updates
* and many waiters.
*/
- if (i915_seqno_passed(intel_engine_get_seqno(engine),
- wait->seqno)) {
+ if (i915_seqno_passed(seqno, wait->seqno)) {
struct i915_request *waiter = wait->request;
- wakeup = true;
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ if (waiter &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&waiter->fence.flags) &&
intel_wait_check_request(wait, waiter))
rq = i915_request_get(waiter);
+
+ tsk = wait->tsk;
+ } else {
+ if (engine->irq_seqno_barrier &&
+ i915_seqno_passed(seqno, wait->seqno - 1)) {
+ set_bit(ENGINE_IRQ_BREADCRUMB,
+ &engine->irq_posted);
+ tsk = wait->tsk;
+ }
}
- if (wakeup)
- wake_up_process(wait->tsk);
+ engine->breadcrumbs.irq_count++;
} else {
if (engine->breadcrumbs.irq_armed)
__intel_engine_disarm_breadcrumbs(engine);
@@ -1183,11 +1206,19 @@ static void notify_ring(struct intel_engine_cs *engine)
spin_unlock(&engine->breadcrumbs.irq_lock);
if (rq) {
- dma_fence_signal(&rq->fence);
+ spin_lock(&rq->lock);
+ dma_fence_signal_locked(&rq->fence);
GEM_BUG_ON(!i915_request_completed(rq));
+ spin_unlock(&rq->lock);
+
i915_request_put(rq);
}
+ if (tsk && tsk->state & TASK_NORMAL)
+ wake_up_process(tsk);
+
+ rcu_read_unlock();
+
trace_intel_engine_notify(engine, wait);
}
@@ -1234,9 +1265,9 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
c0 = max(render, media);
c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
- if (c0 > time * rps->up_threshold)
+ if (c0 > time * rps->power.up_threshold)
events = GEN6_PM_RP_UP_THRESHOLD;
- else if (c0 < time * rps->down_threshold)
+ else if (c0 < time * rps->power.down_threshold)
events = GEN6_PM_RP_DOWN_THRESHOLD;
}
@@ -1462,14 +1493,10 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
static void
gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
bool tasklet = false;
- if (iir & GT_CONTEXT_SWITCH_INTERRUPT) {
- if (READ_ONCE(engine->execlists.active))
- tasklet = !test_and_set_bit(ENGINE_IRQ_EXECLIST,
- &engine->irq_posted);
- }
+ if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
+ tasklet = true;
if (iir & GT_RENDER_USER_INTERRUPT) {
notify_ring(engine);
@@ -1477,7 +1504,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
}
if (tasklet)
- tasklet_hi_schedule(&execlists->tasklet);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
}
static void gen8_gt_irq_ack(struct drm_i915_private *i915,
@@ -1549,78 +1576,122 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915,
}
}
-static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
+static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_A:
+ switch (pin) {
+ case HPD_PORT_C:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
+ case HPD_PORT_D:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
+ case HPD_PORT_E:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
+ case HPD_PORT_F:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
+ default:
+ return false;
+ }
+}
+
+static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+ switch (pin) {
+ case HPD_PORT_A:
return val & PORTA_HOTPLUG_LONG_DETECT;
- case PORT_B:
+ case HPD_PORT_B:
return val & PORTB_HOTPLUG_LONG_DETECT;
- case PORT_C:
+ case HPD_PORT_C:
return val & PORTC_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
-static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
+static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+ switch (pin) {
+ case HPD_PORT_A:
+ return val & ICP_DDIA_HPD_LONG_DETECT;
+ case HPD_PORT_B:
+ return val & ICP_DDIB_HPD_LONG_DETECT;
+ default:
+ return false;
+ }
+}
+
+static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+ switch (pin) {
+ case HPD_PORT_C:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
+ case HPD_PORT_D:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
+ case HPD_PORT_E:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
+ case HPD_PORT_F:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
+ default:
+ return false;
+ }
+}
+
+static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_E:
+ switch (pin) {
+ case HPD_PORT_E:
return val & PORTE_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
-static bool spt_port_hotplug_long_detect(enum port port, u32 val)
+static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_A:
+ switch (pin) {
+ case HPD_PORT_A:
return val & PORTA_HOTPLUG_LONG_DETECT;
- case PORT_B:
+ case HPD_PORT_B:
return val & PORTB_HOTPLUG_LONG_DETECT;
- case PORT_C:
+ case HPD_PORT_C:
return val & PORTC_HOTPLUG_LONG_DETECT;
- case PORT_D:
+ case HPD_PORT_D:
return val & PORTD_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
-static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
+static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_A:
+ switch (pin) {
+ case HPD_PORT_A:
return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
-static bool pch_port_hotplug_long_detect(enum port port, u32 val)
+static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_B:
+ switch (pin) {
+ case HPD_PORT_B:
return val & PORTB_HOTPLUG_LONG_DETECT;
- case PORT_C:
+ case HPD_PORT_C:
return val & PORTC_HOTPLUG_LONG_DETECT;
- case PORT_D:
+ case HPD_PORT_D:
return val & PORTD_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
-static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
+static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
- switch (port) {
- case PORT_B:
+ switch (pin) {
+ case HPD_PORT_B:
return val & PORTB_HOTPLUG_INT_LONG_PULSE;
- case PORT_C:
+ case HPD_PORT_C:
return val & PORTC_HOTPLUG_INT_LONG_PULSE;
- case PORT_D:
+ case HPD_PORT_D:
return val & PORTD_HOTPLUG_INT_LONG_PULSE;
default:
return false;
@@ -1638,27 +1709,22 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
u32 *pin_mask, u32 *long_mask,
u32 hotplug_trigger, u32 dig_hotplug_reg,
const u32 hpd[HPD_NUM_PINS],
- bool long_pulse_detect(enum port port, u32 val))
+ bool long_pulse_detect(enum hpd_pin pin, u32 val))
{
- enum port port;
- int i;
+ enum hpd_pin pin;
- for_each_hpd_pin(i) {
- if ((hpd[i] & hotplug_trigger) == 0)
+ for_each_hpd_pin(pin) {
+ if ((hpd[pin] & hotplug_trigger) == 0)
continue;
- *pin_mask |= BIT(i);
+ *pin_mask |= BIT(pin);
- port = intel_hpd_pin_to_port(dev_priv, i);
- if (port == PORT_NONE)
- continue;
-
- if (long_pulse_detect(port, dig_hotplug_reg))
- *long_mask |= BIT(i);
+ if (long_pulse_detect(pin, dig_hotplug_reg))
+ *long_mask |= BIT(pin);
}
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
- hotplug_trigger, dig_hotplug_reg, *pin_mask);
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
+ hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
}
@@ -1680,69 +1746,34 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
uint32_t crc4)
{
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
- struct intel_pipe_crc_entry *entry;
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- struct drm_driver *driver = dev_priv->drm.driver;
uint32_t crcs[5];
- int head, tail;
spin_lock(&pipe_crc->lock);
- if (pipe_crc->source && !crtc->base.crc.opened) {
- if (!pipe_crc->entries) {
- spin_unlock(&pipe_crc->lock);
- DRM_DEBUG_KMS("spurious interrupt\n");
- return;
- }
-
- head = pipe_crc->head;
- tail = pipe_crc->tail;
-
- if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
- spin_unlock(&pipe_crc->lock);
- DRM_ERROR("CRC buffer overflowing\n");
- return;
- }
-
- entry = &pipe_crc->entries[head];
-
- entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe);
- entry->crc[0] = crc0;
- entry->crc[1] = crc1;
- entry->crc[2] = crc2;
- entry->crc[3] = crc3;
- entry->crc[4] = crc4;
-
- head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
- pipe_crc->head = head;
-
- spin_unlock(&pipe_crc->lock);
-
- wake_up_interruptible(&pipe_crc->wq);
- } else {
- /*
- * For some not yet identified reason, the first CRC is
- * bonkers. So let's just wait for the next vblank and read
- * out the buggy result.
- *
- * On GEN8+ sometimes the second CRC is bonkers as well, so
- * don't trust that one either.
- */
- if (pipe_crc->skipped <= 0 ||
- (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
- pipe_crc->skipped++;
- spin_unlock(&pipe_crc->lock);
- return;
- }
+ /*
+ * For some not yet identified reason, the first CRC is
+ * bonkers. So let's just wait for the next vblank and read
+ * out the buggy result.
+ *
+ * On GEN8+ sometimes the second CRC is bonkers as well, so
+ * don't trust that one either.
+ */
+ if (pipe_crc->skipped <= 0 ||
+ (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
+ pipe_crc->skipped++;
spin_unlock(&pipe_crc->lock);
- crcs[0] = crc0;
- crcs[1] = crc1;
- crcs[2] = crc2;
- crcs[3] = crc3;
- crcs[4] = crc4;
- drm_crtc_add_crc_entry(&crtc->base, true,
- drm_crtc_accurate_vblank_count(&crtc->base),
- crcs);
+ return;
}
+ spin_unlock(&pipe_crc->lock);
+
+ crcs[0] = crc0;
+ crcs[1] = crc1;
+ crcs[2] = crc2;
+ crcs[3] = crc3;
+ crcs[4] = crc4;
+ drm_crtc_add_crc_entry(&crtc->base, true,
+ drm_crtc_accurate_vblank_count(&crtc->base),
+ crcs);
}
#else
static inline void
@@ -2136,7 +2167,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
I915_WRITE(VLV_IER, ier);
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
- POSTING_READ(VLV_MASTER_IER);
if (gt_iir)
snb_gt_irq_handler(dev_priv, gt_iir);
@@ -2221,7 +2251,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
I915_WRITE(VLV_IER, ier);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
- POSTING_READ(GEN8_MASTER_IRQ);
gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
@@ -2390,6 +2419,43 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
cpt_serr_int_handler(dev_priv);
}
+static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+{
+ u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
+ u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
+ u32 pin_mask = 0, long_mask = 0;
+
+ if (ddi_hotplug_trigger) {
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
+ I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
+
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ ddi_hotplug_trigger,
+ dig_hotplug_reg, hpd_icp,
+ icp_ddi_port_hotplug_long_detect);
+ }
+
+ if (tc_hotplug_trigger) {
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
+ I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
+
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ tc_hotplug_trigger,
+ dig_hotplug_reg, hpd_icp,
+ icp_tc_port_hotplug_long_detect);
+ }
+
+ if (pin_mask)
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+
+ if (pch_iir & SDE_GMBUS_ICP)
+ gmbus_irq_handler(dev_priv);
+}
+
static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
@@ -2553,7 +2619,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
- POSTING_READ(DEIER);
/* Disable south interrupts. We'll only write to SDEIIR once, so further
* interrupts will will be stored on its back queue, and then we'll be
@@ -2563,7 +2628,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
if (!HAS_PCH_NOP(dev_priv)) {
sde_ier = I915_READ(SDEIER);
I915_WRITE(SDEIER, 0);
- POSTING_READ(SDEIER);
}
/* Find, clear, then process each source of interrupt */
@@ -2598,11 +2662,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
}
I915_WRITE(DEIER, de_ier);
- POSTING_READ(DEIER);
- if (!HAS_PCH_NOP(dev_priv)) {
+ if (!HAS_PCH_NOP(dev_priv))
I915_WRITE(SDEIER, sde_ier);
- POSTING_READ(SDEIER);
- }
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
enable_rpm_wakeref_asserts(dev_priv);
@@ -2626,6 +2687,40 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
}
+static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
+{
+ u32 pin_mask = 0, long_mask = 0;
+ u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
+ u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
+
+ if (trigger_tc) {
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
+ I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
+
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
+ dig_hotplug_reg, hpd_gen11,
+ gen11_port_hotplug_long_detect);
+ }
+
+ if (trigger_tbt) {
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
+ I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
+
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
+ dig_hotplug_reg, hpd_gen11,
+ gen11_port_hotplug_long_detect);
+ }
+
+ if (pin_mask)
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ else
+ DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
+}
+
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
@@ -2661,6 +2756,17 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
}
+ if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
+ iir = I915_READ(GEN11_DE_HPD_IIR);
+ if (iir) {
+ I915_WRITE(GEN11_DE_HPD_IIR, iir);
+ ret = IRQ_HANDLED;
+ gen11_hpd_irq_handler(dev_priv, iir);
+ } else {
+ DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
+ }
+ }
+
if (master_ctl & GEN8_DE_PORT_IRQ) {
iir = I915_READ(GEN8_DE_PORT_IIR);
if (iir) {
@@ -2676,7 +2782,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
- if (IS_CNL_WITH_PORT_F(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
+ tmp_mask |= ICL_AUX_CHANNEL_E;
+
+ if (IS_CNL_WITH_PORT_F(dev_priv) ||
+ INTEL_GEN(dev_priv) >= 11)
tmp_mask |= CNL_AUX_CHANNEL_F;
if (iir & tmp_mask) {
@@ -2760,8 +2870,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
- if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
- HAS_PCH_CNP(dev_priv))
+ if (HAS_PCH_ICP(dev_priv))
+ icp_irq_handler(dev_priv, iir);
+ else if (HAS_PCH_SPT(dev_priv) ||
+ HAS_PCH_KBP(dev_priv) ||
+ HAS_PCH_CNP(dev_priv))
spt_irq_handler(dev_priv, iir);
else
cpt_irq_handler(dev_priv, iir);
@@ -2978,11 +3091,44 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
spin_unlock(&i915->irq_lock);
}
+static void
+gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl,
+ u32 *iir)
+{
+ void __iomem * const regs = dev_priv->regs;
+
+ if (!(master_ctl & GEN11_GU_MISC_IRQ))
+ return;
+
+ *iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
+ if (likely(*iir))
+ raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir);
+}
+
+static void
+gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv,
+ const u32 master_ctl, const u32 iir)
+{
+ if (!(master_ctl & GEN11_GU_MISC_IRQ))
+ return;
+
+ if (unlikely(!iir)) {
+ DRM_ERROR("GU_MISC iir blank!\n");
+ return;
+ }
+
+ if (iir & GEN11_GU_MISC_GSE)
+ intel_opregion_asle_intr(dev_priv);
+ else
+ DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir);
+}
+
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = to_i915(arg);
void __iomem * const regs = i915->regs;
u32 master_ctl;
+ u32 gu_misc_iir;
if (!intel_irqs_enabled(i915))
return IRQ_NONE;
@@ -3011,9 +3157,13 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
enable_rpm_wakeref_asserts(i915);
}
+ gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir);
+
/* Acknowledge and enable interrupts. */
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
+ gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir);
+
return IRQ_HANDLED;
}
@@ -3089,7 +3239,7 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
*/
DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
I915_WRITE(EMR, I915_READ(EMR) | eir);
- I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
}
}
@@ -3500,7 +3650,12 @@ static void gen11_irq_reset(struct drm_device *dev)
GEN3_IRQ_RESET(GEN8_DE_PORT_);
GEN3_IRQ_RESET(GEN8_DE_MISC_);
+ GEN3_IRQ_RESET(GEN11_DE_HPD_);
+ GEN3_IRQ_RESET(GEN11_GU_MISC_);
GEN3_IRQ_RESET(GEN8_PCU_);
+
+ if (HAS_PCH_ICP(dev_priv))
+ GEN3_IRQ_RESET(SDE);
}
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
@@ -3617,6 +3772,73 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
ibx_hpd_detection_setup(dev_priv);
}
+static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug;
+
+ hotplug = I915_READ(SHOTPLUG_CTL_DDI);
+ hotplug |= ICP_DDIA_HPD_ENABLE |
+ ICP_DDIB_HPD_ENABLE;
+ I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
+
+ hotplug = I915_READ(SHOTPLUG_CTL_TC);
+ hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
+ ICP_TC_HPD_ENABLE(PORT_TC2) |
+ ICP_TC_HPD_ENABLE(PORT_TC3) |
+ ICP_TC_HPD_ENABLE(PORT_TC4);
+ I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
+}
+
+static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug_irqs, enabled_irqs;
+
+ hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
+
+ ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+
+ icp_hpd_detection_setup(dev_priv);
+}
+
+static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug;
+
+ hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
+ hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
+ I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
+
+ hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
+ hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
+ I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
+}
+
+static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug_irqs, enabled_irqs;
+ u32 val;
+
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
+ hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
+
+ val = I915_READ(GEN11_DE_HPD_IMR);
+ val &= ~hotplug_irqs;
+ I915_WRITE(GEN11_DE_HPD_IMR, val);
+ POSTING_READ(GEN11_DE_HPD_IMR);
+
+ gen11_hpd_detection_setup(dev_priv);
+
+ if (HAS_PCH_ICP(dev_priv))
+ icp_hpd_irq_setup(dev_priv);
+}
+
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 val, hotplug;
@@ -3943,9 +4165,12 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
uint32_t de_pipe_enables;
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
u32 de_port_enables;
- u32 de_misc_masked = GEN8_DE_MISC_GSE | GEN8_DE_EDP_PSR;
+ u32 de_misc_masked = GEN8_DE_EDP_PSR;
enum pipe pipe;
+ if (INTEL_GEN(dev_priv) <= 10)
+ de_misc_masked |= GEN8_DE_MISC_GSE;
+
if (INTEL_GEN(dev_priv) >= 9) {
de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
@@ -3956,7 +4181,10 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
}
- if (IS_CNL_WITH_PORT_F(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
+ de_port_masked |= ICL_AUX_CHANNEL_E;
+
+ if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
de_port_masked |= CNL_AUX_CHANNEL_F;
de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
@@ -3984,10 +4212,18 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
- if (IS_GEN9_LP(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11) {
+ u32 de_hpd_masked = 0;
+ u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
+ GEN11_DE_TBT_HOTPLUG_MASK;
+
+ GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables);
+ gen11_hpd_detection_setup(dev_priv);
+ } else if (IS_GEN9_LP(dev_priv)) {
bxt_hpd_detection_setup(dev_priv);
- else if (IS_BROADWELL(dev_priv))
+ } else if (IS_BROADWELL(dev_priv)) {
ilk_hpd_detection_setup(dev_priv);
+ }
}
static int gen8_irq_postinstall(struct drm_device *dev)
@@ -4036,13 +4272,34 @@ static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
}
+static void icp_irq_postinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 mask = SDE_GMBUS_ICP;
+
+ WARN_ON(I915_READ(SDEIER) != 0);
+ I915_WRITE(SDEIER, 0xffffffff);
+ POSTING_READ(SDEIER);
+
+ gen3_assert_iir_is_zero(dev_priv, SDEIIR);
+ I915_WRITE(SDEIMR, ~mask);
+
+ icp_hpd_detection_setup(dev_priv);
+}
+
static int gen11_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 gu_misc_masked = GEN11_GU_MISC_GSE;
+
+ if (HAS_PCH_ICP(dev_priv))
+ icp_irq_postinstall(dev);
gen11_gt_irq_postinstall(dev_priv);
gen8_de_irq_postinstall(dev_priv);
+ GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
+
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
@@ -4090,11 +4347,13 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT);
enable_mask =
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
@@ -4109,6 +4368,81 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
return 0;
}
+static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv,
+ u16 *eir, u16 *eir_stuck)
+{
+ u16 emr;
+
+ *eir = I915_READ16(EIR);
+
+ if (*eir)
+ I915_WRITE16(EIR, *eir);
+
+ *eir_stuck = I915_READ16(EIR);
+ if (*eir_stuck == 0)
+ return;
+
+ /*
+ * Toggle all EMR bits to make sure we get an edge
+ * in the ISR master error bit if we don't clear
+ * all the EIR bits. Otherwise the edge triggered
+ * IIR on i965/g4x wouldn't notice that an interrupt
+ * is still pending. Also some EIR bits can't be
+ * cleared except by handling the underlying error
+ * (or by a GPU reset) so we mask any bit that
+ * remains set.
+ */
+ emr = I915_READ16(EMR);
+ I915_WRITE16(EMR, 0xffff);
+ I915_WRITE16(EMR, emr | *eir_stuck);
+}
+
+static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
+ u16 eir, u16 eir_stuck)
+{
+ DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
+
+ if (eir_stuck)
+ DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
+}
+
+static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
+ u32 *eir, u32 *eir_stuck)
+{
+ u32 emr;
+
+ *eir = I915_READ(EIR);
+
+ I915_WRITE(EIR, *eir);
+
+ *eir_stuck = I915_READ(EIR);
+ if (*eir_stuck == 0)
+ return;
+
+ /*
+ * Toggle all EMR bits to make sure we get an edge
+ * in the ISR master error bit if we don't clear
+ * all the EIR bits. Otherwise the edge triggered
+ * IIR on i965/g4x wouldn't notice that an interrupt
+ * is still pending. Also some EIR bits can't be
+ * cleared except by handling the underlying error
+ * (or by a GPU reset) so we mask any bit that
+ * remains set.
+ */
+ emr = I915_READ(EMR);
+ I915_WRITE(EMR, 0xffffffff);
+ I915_WRITE(EMR, emr | *eir_stuck);
+}
+
+static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
+ u32 eir, u32 eir_stuck)
+{
+ DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
+
+ if (eir_stuck)
+ DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
+}
+
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
@@ -4123,6 +4457,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
do {
u32 pipe_stats[I915_MAX_PIPES] = {};
+ u16 eir = 0, eir_stuck = 0;
u16 iir;
iir = I915_READ16(IIR);
@@ -4135,13 +4470,16 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
* signalled in iir */
i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
+
I915_WRITE16(IIR, iir);
if (iir & I915_USER_INTERRUPT)
notify_ring(dev_priv->engine[RCS]);
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
} while (0);
@@ -4179,12 +4517,14 @@ static int i915_irq_postinstall(struct drm_device *dev)
dev_priv->irq_mask =
~(I915_ASLE_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT);
enable_mask =
I915_ASLE_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
if (I915_HAS_HOTPLUG(dev_priv)) {
@@ -4222,6 +4562,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
do {
u32 pipe_stats[I915_MAX_PIPES] = {};
+ u32 eir = 0, eir_stuck = 0;
u32 hotplug_status = 0;
u32 iir;
@@ -4239,13 +4580,16 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
* signalled in iir */
i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
+
I915_WRITE(IIR, iir);
if (iir & I915_USER_INTERRUPT)
notify_ring(dev_priv->engine[RCS]);
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -4299,14 +4643,14 @@ static int i965_irq_postinstall(struct drm_device *dev)
I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_MASTER_ERROR_INTERRUPT);
enable_mask =
I915_ASLE_INTERRUPT |
I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_MASTER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
if (IS_G4X(dev_priv))
@@ -4366,6 +4710,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
do {
u32 pipe_stats[I915_MAX_PIPES] = {};
+ u32 eir = 0, eir_stuck = 0;
u32 hotplug_status = 0;
u32 iir;
@@ -4382,6 +4727,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
* signalled in iir */
i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
+
I915_WRITE(IIR, iir);
if (iir & I915_USER_INTERRUPT)
@@ -4390,8 +4738,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (iir & I915_BSD_USER_INTERRUPT)
notify_ring(dev_priv->engine[VCS]);
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
+ if (iir & I915_MASTER_ERROR_INTERRUPT)
+ i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -4506,7 +4854,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_uninstall = gen11_irq_reset;
dev->driver->enable_vblank = gen8_enable_vblank;
dev->driver->disable_vblank = gen8_disable_vblank;
- dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
+ dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
} else if (INTEL_GEN(dev_priv) >= 8) {
dev->driver->irq_handler = gen8_irq_handler;
dev->driver->irq_preinstall = gen8_irq_reset;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 66ea3552c63e..295e981e4a39 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -44,10 +44,6 @@ i915_param_named(modeset, int, 0400,
"Use kernel modesetting [KMS] (0=disable, "
"1=on, -1=force vga console preference [default])");
-i915_param_named_unsafe(panel_ignore_lid, int, 0600,
- "Override lid status (0=autodetect, 1=autodetect disabled [default], "
- "-1=force lid closed, -2=force lid open)");
-
i915_param_named_unsafe(enable_dc, int, 0400,
"Enable power-saving display C-states. "
"(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
@@ -92,7 +88,7 @@ i915_param_named_unsafe(enable_ppgtt, int, 0400,
i915_param_named_unsafe(enable_psr, int, 0600,
"Enable PSR "
- "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
+ "(0=disabled, 1=enabled) "
"Default: -1 (use per-chip default)");
i915_param_named_unsafe(alpha_support, bool, 0400,
@@ -130,9 +126,6 @@ i915_param_named_unsafe(invert_brightness, int, 0600,
i915_param_named(disable_display, bool, 0400,
"Disable display (default: false)");
-i915_param_named_unsafe(enable_cmd_parser, bool, 0400,
- "Enable command parsing (true=enabled [default], false=disabled)");
-
i915_param_named(mmio_debug, int, 0600,
"Enable the MMIO debug code for the first N failures (default: off). "
"This may negatively affect performance.");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 6684025b7af8..6c4d4a21474b 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -36,7 +36,6 @@ struct drm_printer;
#define I915_PARAMS_FOR_EACH(param) \
param(char *, vbt_firmware, NULL) \
param(int, modeset, -1) \
- param(int, panel_ignore_lid, 1) \
param(int, lvds_channel_mode, 0) \
param(int, panel_use_ssc, -1) \
param(int, vbt_sdvo_panel_type, -1) \
@@ -58,7 +57,6 @@ struct drm_printer;
param(unsigned int, inject_load_failure, 0) \
/* leave bools at the end to not create holes */ \
param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
- param(bool, enable_cmd_parser, true) \
param(bool, enable_hangcheck, true) \
param(bool, fastboot, false) \
param(bool, prefault_disable, false) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 4364922e935d..6a4d1388ad2d 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -340,7 +340,6 @@ static const struct intel_device_info intel_valleyview_info = {
GEN(7),
.is_lp = 1,
.num_pipes = 2,
- .has_psr = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_gmch_display = 1,
@@ -433,7 +432,6 @@ static const struct intel_device_info intel_cherryview_info = {
.is_lp = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_64bit_reloc = 1,
- .has_psr = 1,
.has_runtime_pm = 1,
.has_resource_streamer = 1,
.has_rc6 = 1,
@@ -659,12 +657,15 @@ static const struct pci_device_id pciidlist[] = {
INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info),
INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
+ INTEL_AML_GT2_IDS(&intel_kabylake_gt2_info),
INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
- INTEL_CFL_U_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
+ INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info),
+ INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
+ INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
INTEL_CNL_IDS(&intel_cannonlake_info),
INTEL_ICL_11_IDS(&intel_icelake_11_info),
{0, 0, 0}
@@ -673,10 +674,16 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
static void i915_pci_remove(struct pci_dev *pdev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_device *dev;
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev) /* driver load aborted, nothing to cleanup */
+ return;
i915_driver_unload(dev);
drm_dev_put(dev);
+
+ pci_set_drvdata(pdev, NULL);
}
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -711,6 +718,11 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
+ if (i915_inject_load_failure()) {
+ i915_pci_remove(pdev);
+ return -ENODEV;
+ }
+
err = i915_live_selftests(pdev);
if (err) {
i915_pci_remove(pdev);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 019bd2d073ad..6bf10952c724 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -315,7 +315,7 @@ static u32 i915_oa_max_sample_rate = 100000;
* code assumes all reports have a power-of-two size and ~(size - 1) can
* be used as a mask to align the OA tail pointer.
*/
-static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
+static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_A13] = { 0, 64 },
[I915_OA_FORMAT_A29] = { 1, 128 },
[I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
@@ -326,7 +326,7 @@ static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_C4_B8] = { 7, 64 },
};
-static struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
+static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_A12] = { 0, 64 },
[I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
@@ -737,12 +737,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
continue;
}
- /*
- * XXX: Just keep the lower 21 bits for now since I'm not
- * entirely sure if the HW touches any of the higher bits in
- * this field
- */
- ctx_id = report32[2] & 0x1fffff;
+ ctx_id = report32[2] & dev_priv->perf.oa.specific_ctx_id_mask;
/*
* Squash whatever is in the CTX_ID field if it's marked as
@@ -1203,6 +1198,33 @@ static int i915_oa_read(struct i915_perf_stream *stream,
return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
}
+static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
+ struct i915_gem_context *ctx)
+{
+ struct intel_engine_cs *engine = i915->engine[RCS];
+ struct intel_context *ce;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(&i915->drm);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /*
+ * As the ID is the gtt offset of the context's vma we
+ * pin the vma to ensure the ID remains fixed.
+ *
+ * NB: implied RCS engine...
+ */
+ ce = intel_context_pin(ctx, engine);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (IS_ERR(ce))
+ return ce;
+
+ i915->perf.oa.pinned_ctx = ce;
+
+ return ce;
+}
+
/**
* oa_get_render_ctx_id - determine and hold ctx hw id
* @stream: An i915-perf stream opened for OA metrics
@@ -1215,40 +1237,76 @@ static int i915_oa_read(struct i915_perf_stream *stream,
*/
static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct drm_i915_private *i915 = stream->dev_priv;
+ struct intel_context *ce;
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
- dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
- } else {
- struct intel_engine_cs *engine = dev_priv->engine[RCS];
- struct intel_ring *ring;
- int ret;
-
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (ret)
- return ret;
+ ce = oa_pin_context(i915, stream->ctx);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+ switch (INTEL_GEN(i915)) {
+ case 7: {
/*
- * As the ID is the gtt offset of the context's vma we
- * pin the vma to ensure the ID remains fixed.
- *
- * NB: implied RCS engine...
+ * On Haswell we don't do any post processing of the reports
+ * and don't need to use the mask.
*/
- ring = intel_context_pin(stream->ctx, engine);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- if (IS_ERR(ring))
- return PTR_ERR(ring);
+ i915->perf.oa.specific_ctx_id = i915_ggtt_offset(ce->state);
+ i915->perf.oa.specific_ctx_id_mask = 0;
+ break;
+ }
+ case 8:
+ case 9:
+ case 10:
+ if (USES_GUC_SUBMISSION(i915)) {
+ /*
+ * When using GuC, the context descriptor we write in
+ * i915 is read by GuC and rewritten before it's
+ * actually written into the hardware. The LRCA is
+ * what is put into the context id field of the
+ * context descriptor by GuC. Because it's aligned to
+ * a page, the lower 12bits are always at 0 and
+ * dropped by GuC. They won't be part of the context
+ * ID in the OA reports, so squash those lower bits.
+ */
+ i915->perf.oa.specific_ctx_id =
+ lower_32_bits(ce->lrc_desc) >> 12;
- /*
- * Explicitly track the ID (instead of calling
- * i915_ggtt_offset() on the fly) considering the difference
- * with gen8+ and execlists
- */
- dev_priv->perf.oa.specific_ctx_id =
- i915_ggtt_offset(to_intel_context(stream->ctx, engine)->state);
+ /*
+ * GuC uses the top bit to signal proxy submission, so
+ * ignore that bit.
+ */
+ i915->perf.oa.specific_ctx_id_mask =
+ (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
+ } else {
+ i915->perf.oa.specific_ctx_id_mask =
+ (1U << GEN8_CTX_ID_WIDTH) - 1;
+ i915->perf.oa.specific_ctx_id =
+ upper_32_bits(ce->lrc_desc);
+ i915->perf.oa.specific_ctx_id &=
+ i915->perf.oa.specific_ctx_id_mask;
+ }
+ break;
+
+ case 11: {
+ i915->perf.oa.specific_ctx_id_mask =
+ ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) |
+ ((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) |
+ ((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32);
+ i915->perf.oa.specific_ctx_id = upper_32_bits(ce->lrc_desc);
+ i915->perf.oa.specific_ctx_id &=
+ i915->perf.oa.specific_ctx_id_mask;
+ break;
+ }
+
+ default:
+ MISSING_CASE(INTEL_GEN(i915));
}
+ DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
+ i915->perf.oa.specific_ctx_id,
+ i915->perf.oa.specific_ctx_id_mask);
+
return 0;
}
@@ -1262,17 +1320,15 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_context *ce;
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
- dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
- } else {
- struct intel_engine_cs *engine = dev_priv->engine[RCS];
+ dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
+ dev_priv->perf.oa.specific_ctx_id_mask = 0;
+ ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx);
+ if (ce) {
mutex_lock(&dev_priv->drm.struct_mutex);
-
- dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
- intel_context_unpin(stream->ctx, engine);
-
+ intel_context_unpin(ce);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
}
@@ -1780,7 +1836,9 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
* So far the best way to work around this issue seems to be draining
* the GPU from any submitted work.
*/
- ret = i915_gem_wait_for_idle(dev_priv, wait_flags);
+ ret = i915_gem_wait_for_idle(dev_priv,
+ wait_flags,
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index b50b74053664..d6c8f8fdfda5 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -128,6 +128,7 @@ static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
{
if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
i915->pmu.timer_enabled = true;
+ i915->pmu.timer_last = ktime_get();
hrtimer_start_range_ns(&i915->pmu.timer,
ns_to_ktime(PERIOD), 0,
HRTIMER_MODE_REL_PINNED);
@@ -156,12 +157,13 @@ static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
}
static void
-update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val)
+add_sample(struct i915_pmu_sample *sample, u32 val)
{
- sample->cur += mul_u32_u32(val, unit);
+ sample->cur += val;
}
-static void engines_sample(struct drm_i915_private *dev_priv)
+static void
+engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -183,8 +185,9 @@ static void engines_sample(struct drm_i915_private *dev_priv)
val = !i915_seqno_passed(current_seqno, last_seqno);
- update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
- PERIOD, val);
+ if (val)
+ add_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
+ period_ns);
if (val && (engine->pmu.enable &
(BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
@@ -195,11 +198,13 @@ static void engines_sample(struct drm_i915_private *dev_priv)
val = 0;
}
- update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
- PERIOD, !!(val & RING_WAIT));
+ if (val & RING_WAIT)
+ add_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
+ period_ns);
- update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
- PERIOD, !!(val & RING_WAIT_SEMAPHORE));
+ if (val & RING_WAIT_SEMAPHORE)
+ add_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
+ period_ns);
}
if (fw)
@@ -208,7 +213,14 @@ static void engines_sample(struct drm_i915_private *dev_priv)
intel_runtime_pm_put(dev_priv);
}
-static void frequency_sample(struct drm_i915_private *dev_priv)
+static void
+add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
+{
+ sample->cur += mul_u32_u32(val, mul);
+}
+
+static void
+frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
{
if (dev_priv->pmu.enable &
config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
@@ -222,15 +234,17 @@ static void frequency_sample(struct drm_i915_private *dev_priv)
intel_runtime_pm_put(dev_priv);
}
- update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
- 1, intel_gpu_freq(dev_priv, val));
+ add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
+ intel_gpu_freq(dev_priv, val),
+ period_ns / 1000);
}
if (dev_priv->pmu.enable &
config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
- update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1,
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.cur_freq));
+ add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ],
+ intel_gpu_freq(dev_priv,
+ dev_priv->gt_pm.rps.cur_freq),
+ period_ns / 1000);
}
}
@@ -238,14 +252,27 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
{
struct drm_i915_private *i915 =
container_of(hrtimer, struct drm_i915_private, pmu.timer);
+ unsigned int period_ns;
+ ktime_t now;
if (!READ_ONCE(i915->pmu.timer_enabled))
return HRTIMER_NORESTART;
- engines_sample(i915);
- frequency_sample(i915);
+ now = ktime_get();
+ period_ns = ktime_to_ns(ktime_sub(now, i915->pmu.timer_last));
+ i915->pmu.timer_last = now;
+
+ /*
+ * Strictly speaking the passed in period may not be 100% accurate for
+ * all internal calculation, since some amount of time can be spent on
+ * grabbing the forcewake. However the potential error from timer call-
+ * back delay greatly dominates this so we keep it simple.
+ */
+ engines_sample(i915, period_ns);
+ frequency_sample(i915, period_ns);
+
+ hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
- hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD));
return HRTIMER_RESTART;
}
@@ -520,12 +547,12 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
case I915_PMU_ACTUAL_FREQUENCY:
val =
div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
- FREQUENCY);
+ USEC_PER_SEC /* to MHz */);
break;
case I915_PMU_REQUESTED_FREQUENCY:
val =
div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
- FREQUENCY);
+ USEC_PER_SEC /* to MHz */);
break;
case I915_PMU_INTERRUPTS:
val = count_interrupts(i915);
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 2ba735299f7c..7f164ca3db12 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -65,6 +65,14 @@ struct i915_pmu {
* event types.
*/
u64 enable;
+
+ /**
+ * @timer_last:
+ *
+ * Timestmap of the previous timer invocation.
+ */
+ ktime_t timer_last;
+
/**
* @enable_count: Reference counts for the enabled events.
*
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index 195203f298df..eeaa3d506d95 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -54,6 +54,7 @@ enum vgt_g2v_type {
*/
#define VGT_CAPS_FULL_48BIT_PPGTT BIT(2)
#define VGT_CAPS_HWSP_EMULATION BIT(3)
+#define VGT_CAPS_HUGE_GTT BIT(4)
struct vgt_if {
u64 magic; /* VGT_MAGIC */
@@ -93,7 +94,10 @@ struct vgt_if {
u32 rsv5[4];
u32 g2v_notify;
- u32 rsv6[7];
+ u32 rsv6[5];
+
+ u32 cursor_x_hot;
+ u32 cursor_y_hot;
struct {
u32 lo;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 7720569f2024..91e7483228e1 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -139,23 +139,40 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG);
}
+/*
+ * Given the first two numbers __a and __b of arbitrarily many evenly spaced
+ * numbers, pick the 0-based __index'th value.
+ *
+ * Always prefer this over _PICK() if the numbers are evenly spaced.
+ */
+#define _PICK_EVEN(__index, __a, __b) ((__a) + (__index) * ((__b) - (__a)))
+
+/*
+ * Given the arbitrary numbers in varargs, pick the 0-based __index'th number.
+ *
+ * Always prefer _PICK_EVEN() over this if the numbers are evenly spaced.
+ */
#define _PICK(__index, ...) (((const u32 []){ __VA_ARGS__ })[__index])
-#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+/*
+ * Named helper wrappers around _PICK_EVEN() and _PICK().
+ */
+#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b)
#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b))
-#define _PLANE(plane, a, b) _PIPE(plane, a, b)
+#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b)
#define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b)
-#define _TRANS(tran, a, b) ((a) + (tran)*((b)-(a)))
+#define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b)
#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
-#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
+#define _PORT(port, a, b) _PICK_EVEN(port, a, b)
#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _PLL(pll, a, b) ((a) + (pll)*((b)-(a)))
+#define _PLL(pll, a, b) _PICK_EVEN(pll, a, b)
#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
+#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
#define _MASKED_FIELD(mask, value) ({ \
if (__builtin_constant_p(mask)) \
BUILD_BUG_ON_MSG(((mask) & 0xffff0000), "Incorrect mask"); \
@@ -164,7 +181,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
if (__builtin_constant_p(mask) && __builtin_constant_p(value)) \
BUILD_BUG_ON_MSG((value) & ~(mask), \
"Incorrect value for mask"); \
- (mask) << 16 | (value); })
+ __MASKED_FIELD(mask, value); })
#define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
#define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0))
@@ -270,19 +287,19 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4)
-#define ILK_GRDOM_FULL (0<<1)
-#define ILK_GRDOM_RENDER (1<<1)
-#define ILK_GRDOM_MEDIA (3<<1)
-#define ILK_GRDOM_MASK (3<<1)
-#define ILK_GRDOM_RESET_ENABLE (1<<0)
+#define ILK_GRDOM_FULL (0 << 1)
+#define ILK_GRDOM_RENDER (1 << 1)
+#define ILK_GRDOM_MEDIA (3 << 1)
+#define ILK_GRDOM_MASK (3 << 1)
+#define ILK_GRDOM_RESET_ENABLE (1 << 0)
#define GEN6_MBCUNIT_SNPCR _MMIO(0x900c) /* for LLC config */
#define GEN6_MBC_SNPCR_SHIFT 21
-#define GEN6_MBC_SNPCR_MASK (3<<21)
-#define GEN6_MBC_SNPCR_MAX (0<<21)
-#define GEN6_MBC_SNPCR_MED (1<<21)
-#define GEN6_MBC_SNPCR_LOW (2<<21)
-#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
+#define GEN6_MBC_SNPCR_MASK (3 << 21)
+#define GEN6_MBC_SNPCR_MAX (0 << 21)
+#define GEN6_MBC_SNPCR_MED (1 << 21)
+#define GEN6_MBC_SNPCR_LOW (2 << 21)
+#define GEN6_MBC_SNPCR_MIN (3 << 21) /* only 1/16th of the cache is shared */
#define VLV_G3DCTL _MMIO(0x9024)
#define VLV_GSCKGCTL _MMIO(0x9028)
@@ -314,13 +331,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_GRDOM_VECS (1 << 13)
#define GEN11_GRDOM_VECS2 (1 << 14)
-#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base+0x228)
-#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base+0x518)
-#define RING_PP_DIR_DCLV(engine) _MMIO((engine)->mmio_base+0x220)
+#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base + 0x228)
+#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base + 0x518)
+#define RING_PP_DIR_DCLV(engine) _MMIO((engine)->mmio_base + 0x220)
#define PP_DIR_DCLV_2G 0xffffffff
-#define GEN8_RING_PDP_UDW(engine, n) _MMIO((engine)->mmio_base+0x270 + (n) * 8 + 4)
-#define GEN8_RING_PDP_LDW(engine, n) _MMIO((engine)->mmio_base+0x270 + (n) * 8)
+#define GEN8_RING_PDP_UDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8 + 4)
+#define GEN8_RING_PDP_LDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8)
#define GEN8_R_PWR_CLK_STATE _MMIO(0x20C8)
#define GEN8_RPCS_ENABLE (1 << 31)
@@ -358,25 +375,25 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN8_SELECTIVE_READ_ADDRESSING_ENABLE (1 << 13)
#define GAM_ECOCHK _MMIO(0x4090)
-#define BDW_DISABLE_HDC_INVALIDATION (1<<25)
-#define ECOCHK_SNB_BIT (1<<10)
-#define ECOCHK_DIS_TLB (1<<8)
-#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
-#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
-#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
-#define ECOCHK_PPGTT_GFDT_IVB (0x1<<4)
-#define ECOCHK_PPGTT_LLC_IVB (0x1<<3)
-#define ECOCHK_PPGTT_UC_HSW (0x1<<3)
-#define ECOCHK_PPGTT_WT_HSW (0x2<<3)
-#define ECOCHK_PPGTT_WB_HSW (0x3<<3)
+#define BDW_DISABLE_HDC_INVALIDATION (1 << 25)
+#define ECOCHK_SNB_BIT (1 << 10)
+#define ECOCHK_DIS_TLB (1 << 8)
+#define HSW_ECOCHK_ARB_PRIO_SOL (1 << 6)
+#define ECOCHK_PPGTT_CACHE64B (0x3 << 3)
+#define ECOCHK_PPGTT_CACHE4B (0x0 << 3)
+#define ECOCHK_PPGTT_GFDT_IVB (0x1 << 4)
+#define ECOCHK_PPGTT_LLC_IVB (0x1 << 3)
+#define ECOCHK_PPGTT_UC_HSW (0x1 << 3)
+#define ECOCHK_PPGTT_WT_HSW (0x2 << 3)
+#define ECOCHK_PPGTT_WB_HSW (0x3 << 3)
#define GAC_ECO_BITS _MMIO(0x14090)
-#define ECOBITS_SNB_BIT (1<<13)
-#define ECOBITS_PPGTT_CACHE64B (3<<8)
-#define ECOBITS_PPGTT_CACHE4B (0<<8)
+#define ECOBITS_SNB_BIT (1 << 13)
+#define ECOBITS_PPGTT_CACHE64B (3 << 8)
+#define ECOBITS_PPGTT_CACHE4B (0 << 8)
#define GAB_CTL _MMIO(0x24000)
-#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
+#define GAB_CTL_CONT_AFTER_PAGEFAULT (1 << 8)
#define GEN6_STOLEN_RESERVED _MMIO(0x1082C0)
#define GEN6_STOLEN_RESERVED_ADDR_MASK (0xFFF << 20)
@@ -395,6 +412,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN8_STOLEN_RESERVED_4M (2 << 7)
#define GEN8_STOLEN_RESERVED_8M (3 << 7)
#define GEN6_STOLEN_RESERVED_ENABLE (1 << 0)
+#define GEN11_STOLEN_RESERVED_ADDR_MASK (0xFFFFFFFFFFFULL << 20)
/* VGA stuff */
@@ -404,15 +422,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _VGA_MSR_WRITE _MMIO(0x3c2)
#define VGA_MSR_WRITE 0x3c2
#define VGA_MSR_READ 0x3cc
-#define VGA_MSR_MEM_EN (1<<1)
-#define VGA_MSR_CGA_MODE (1<<0)
+#define VGA_MSR_MEM_EN (1 << 1)
+#define VGA_MSR_CGA_MODE (1 << 0)
#define VGA_SR_INDEX 0x3c4
#define SR01 1
#define VGA_SR_DATA 0x3c5
#define VGA_AR_INDEX 0x3c0
-#define VGA_AR_VID_EN (1<<5)
+#define VGA_AR_VID_EN (1 << 5)
#define VGA_AR_DATA_WRITE 0x3c0
#define VGA_AR_DATA_READ 0x3c1
@@ -445,8 +463,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MI_PREDICATE_SRC1_UDW _MMIO(0x2408 + 4)
#define MI_PREDICATE_RESULT_2 _MMIO(0x2214)
-#define LOWER_SLICE_ENABLED (1<<0)
-#define LOWER_SLICE_DISABLED (0<<0)
+#define LOWER_SLICE_ENABLED (1 << 0)
+#define LOWER_SLICE_DISABLED (0 << 0)
/*
* Registers used only by the command parser
@@ -504,47 +522,47 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN7_OACONTROL_CTX_MASK 0xFFFFF000
#define GEN7_OACONTROL_TIMER_PERIOD_MASK 0x3F
#define GEN7_OACONTROL_TIMER_PERIOD_SHIFT 6
-#define GEN7_OACONTROL_TIMER_ENABLE (1<<5)
-#define GEN7_OACONTROL_FORMAT_A13 (0<<2)
-#define GEN7_OACONTROL_FORMAT_A29 (1<<2)
-#define GEN7_OACONTROL_FORMAT_A13_B8_C8 (2<<2)
-#define GEN7_OACONTROL_FORMAT_A29_B8_C8 (3<<2)
-#define GEN7_OACONTROL_FORMAT_B4_C8 (4<<2)
-#define GEN7_OACONTROL_FORMAT_A45_B8_C8 (5<<2)
-#define GEN7_OACONTROL_FORMAT_B4_C8_A16 (6<<2)
-#define GEN7_OACONTROL_FORMAT_C4_B8 (7<<2)
+#define GEN7_OACONTROL_TIMER_ENABLE (1 << 5)
+#define GEN7_OACONTROL_FORMAT_A13 (0 << 2)
+#define GEN7_OACONTROL_FORMAT_A29 (1 << 2)
+#define GEN7_OACONTROL_FORMAT_A13_B8_C8 (2 << 2)
+#define GEN7_OACONTROL_FORMAT_A29_B8_C8 (3 << 2)
+#define GEN7_OACONTROL_FORMAT_B4_C8 (4 << 2)
+#define GEN7_OACONTROL_FORMAT_A45_B8_C8 (5 << 2)
+#define GEN7_OACONTROL_FORMAT_B4_C8_A16 (6 << 2)
+#define GEN7_OACONTROL_FORMAT_C4_B8 (7 << 2)
#define GEN7_OACONTROL_FORMAT_SHIFT 2
-#define GEN7_OACONTROL_PER_CTX_ENABLE (1<<1)
-#define GEN7_OACONTROL_ENABLE (1<<0)
+#define GEN7_OACONTROL_PER_CTX_ENABLE (1 << 1)
+#define GEN7_OACONTROL_ENABLE (1 << 0)
#define GEN8_OACTXID _MMIO(0x2364)
#define GEN8_OA_DEBUG _MMIO(0x2B04)
-#define GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS (1<<5)
-#define GEN9_OA_DEBUG_INCLUDE_CLK_RATIO (1<<6)
-#define GEN9_OA_DEBUG_DISABLE_GO_1_0_REPORTS (1<<2)
-#define GEN9_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS (1<<1)
+#define GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS (1 << 5)
+#define GEN9_OA_DEBUG_INCLUDE_CLK_RATIO (1 << 6)
+#define GEN9_OA_DEBUG_DISABLE_GO_1_0_REPORTS (1 << 2)
+#define GEN9_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS (1 << 1)
#define GEN8_OACONTROL _MMIO(0x2B00)
-#define GEN8_OA_REPORT_FORMAT_A12 (0<<2)
-#define GEN8_OA_REPORT_FORMAT_A12_B8_C8 (2<<2)
-#define GEN8_OA_REPORT_FORMAT_A36_B8_C8 (5<<2)
-#define GEN8_OA_REPORT_FORMAT_C4_B8 (7<<2)
+#define GEN8_OA_REPORT_FORMAT_A12 (0 << 2)
+#define GEN8_OA_REPORT_FORMAT_A12_B8_C8 (2 << 2)
+#define GEN8_OA_REPORT_FORMAT_A36_B8_C8 (5 << 2)
+#define GEN8_OA_REPORT_FORMAT_C4_B8 (7 << 2)
#define GEN8_OA_REPORT_FORMAT_SHIFT 2
-#define GEN8_OA_SPECIFIC_CONTEXT_ENABLE (1<<1)
-#define GEN8_OA_COUNTER_ENABLE (1<<0)
+#define GEN8_OA_SPECIFIC_CONTEXT_ENABLE (1 << 1)
+#define GEN8_OA_COUNTER_ENABLE (1 << 0)
#define GEN8_OACTXCONTROL _MMIO(0x2360)
#define GEN8_OA_TIMER_PERIOD_MASK 0x3F
#define GEN8_OA_TIMER_PERIOD_SHIFT 2
-#define GEN8_OA_TIMER_ENABLE (1<<1)
-#define GEN8_OA_COUNTER_RESUME (1<<0)
+#define GEN8_OA_TIMER_ENABLE (1 << 1)
+#define GEN8_OA_COUNTER_RESUME (1 << 0)
#define GEN7_OABUFFER _MMIO(0x23B0) /* R/W */
-#define GEN7_OABUFFER_OVERRUN_DISABLE (1<<3)
-#define GEN7_OABUFFER_EDGE_TRIGGER (1<<2)
-#define GEN7_OABUFFER_STOP_RESUME_ENABLE (1<<1)
-#define GEN7_OABUFFER_RESUME (1<<0)
+#define GEN7_OABUFFER_OVERRUN_DISABLE (1 << 3)
+#define GEN7_OABUFFER_EDGE_TRIGGER (1 << 2)
+#define GEN7_OABUFFER_STOP_RESUME_ENABLE (1 << 1)
+#define GEN7_OABUFFER_RESUME (1 << 0)
#define GEN8_OABUFFER_UDW _MMIO(0x23b4)
#define GEN8_OABUFFER _MMIO(0x2b14)
@@ -552,33 +570,33 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN7_OASTATUS1 _MMIO(0x2364)
#define GEN7_OASTATUS1_TAIL_MASK 0xffffffc0
-#define GEN7_OASTATUS1_COUNTER_OVERFLOW (1<<2)
-#define GEN7_OASTATUS1_OABUFFER_OVERFLOW (1<<1)
-#define GEN7_OASTATUS1_REPORT_LOST (1<<0)
+#define GEN7_OASTATUS1_COUNTER_OVERFLOW (1 << 2)
+#define GEN7_OASTATUS1_OABUFFER_OVERFLOW (1 << 1)
+#define GEN7_OASTATUS1_REPORT_LOST (1 << 0)
#define GEN7_OASTATUS2 _MMIO(0x2368)
#define GEN7_OASTATUS2_HEAD_MASK 0xffffffc0
#define GEN7_OASTATUS2_MEM_SELECT_GGTT (1 << 0) /* 0: PPGTT, 1: GGTT */
#define GEN8_OASTATUS _MMIO(0x2b08)
-#define GEN8_OASTATUS_OVERRUN_STATUS (1<<3)
-#define GEN8_OASTATUS_COUNTER_OVERFLOW (1<<2)
-#define GEN8_OASTATUS_OABUFFER_OVERFLOW (1<<1)
-#define GEN8_OASTATUS_REPORT_LOST (1<<0)
+#define GEN8_OASTATUS_OVERRUN_STATUS (1 << 3)
+#define GEN8_OASTATUS_COUNTER_OVERFLOW (1 << 2)
+#define GEN8_OASTATUS_OABUFFER_OVERFLOW (1 << 1)
+#define GEN8_OASTATUS_REPORT_LOST (1 << 0)
#define GEN8_OAHEADPTR _MMIO(0x2B0C)
#define GEN8_OAHEADPTR_MASK 0xffffffc0
#define GEN8_OATAILPTR _MMIO(0x2B10)
#define GEN8_OATAILPTR_MASK 0xffffffc0
-#define OABUFFER_SIZE_128K (0<<3)
-#define OABUFFER_SIZE_256K (1<<3)
-#define OABUFFER_SIZE_512K (2<<3)
-#define OABUFFER_SIZE_1M (3<<3)
-#define OABUFFER_SIZE_2M (4<<3)
-#define OABUFFER_SIZE_4M (5<<3)
-#define OABUFFER_SIZE_8M (6<<3)
-#define OABUFFER_SIZE_16M (7<<3)
+#define OABUFFER_SIZE_128K (0 << 3)
+#define OABUFFER_SIZE_256K (1 << 3)
+#define OABUFFER_SIZE_512K (2 << 3)
+#define OABUFFER_SIZE_1M (3 << 3)
+#define OABUFFER_SIZE_2M (4 << 3)
+#define OABUFFER_SIZE_4M (5 << 3)
+#define OABUFFER_SIZE_8M (6 << 3)
+#define OABUFFER_SIZE_16M (7 << 3)
/*
* Flexible, Aggregate EU Counter Registers.
@@ -601,35 +619,35 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OASTARTTRIG1_THRESHOLD_MASK 0xffff
#define OASTARTTRIG2 _MMIO(0x2714)
-#define OASTARTTRIG2_INVERT_A_0 (1<<0)
-#define OASTARTTRIG2_INVERT_A_1 (1<<1)
-#define OASTARTTRIG2_INVERT_A_2 (1<<2)
-#define OASTARTTRIG2_INVERT_A_3 (1<<3)
-#define OASTARTTRIG2_INVERT_A_4 (1<<4)
-#define OASTARTTRIG2_INVERT_A_5 (1<<5)
-#define OASTARTTRIG2_INVERT_A_6 (1<<6)
-#define OASTARTTRIG2_INVERT_A_7 (1<<7)
-#define OASTARTTRIG2_INVERT_A_8 (1<<8)
-#define OASTARTTRIG2_INVERT_A_9 (1<<9)
-#define OASTARTTRIG2_INVERT_A_10 (1<<10)
-#define OASTARTTRIG2_INVERT_A_11 (1<<11)
-#define OASTARTTRIG2_INVERT_A_12 (1<<12)
-#define OASTARTTRIG2_INVERT_A_13 (1<<13)
-#define OASTARTTRIG2_INVERT_A_14 (1<<14)
-#define OASTARTTRIG2_INVERT_A_15 (1<<15)
-#define OASTARTTRIG2_INVERT_B_0 (1<<16)
-#define OASTARTTRIG2_INVERT_B_1 (1<<17)
-#define OASTARTTRIG2_INVERT_B_2 (1<<18)
-#define OASTARTTRIG2_INVERT_B_3 (1<<19)
-#define OASTARTTRIG2_INVERT_C_0 (1<<20)
-#define OASTARTTRIG2_INVERT_C_1 (1<<21)
-#define OASTARTTRIG2_INVERT_D_0 (1<<22)
-#define OASTARTTRIG2_THRESHOLD_ENABLE (1<<23)
-#define OASTARTTRIG2_START_TRIG_FLAG_MBZ (1<<24)
-#define OASTARTTRIG2_EVENT_SELECT_0 (1<<28)
-#define OASTARTTRIG2_EVENT_SELECT_1 (1<<29)
-#define OASTARTTRIG2_EVENT_SELECT_2 (1<<30)
-#define OASTARTTRIG2_EVENT_SELECT_3 (1<<31)
+#define OASTARTTRIG2_INVERT_A_0 (1 << 0)
+#define OASTARTTRIG2_INVERT_A_1 (1 << 1)
+#define OASTARTTRIG2_INVERT_A_2 (1 << 2)
+#define OASTARTTRIG2_INVERT_A_3 (1 << 3)
+#define OASTARTTRIG2_INVERT_A_4 (1 << 4)
+#define OASTARTTRIG2_INVERT_A_5 (1 << 5)
+#define OASTARTTRIG2_INVERT_A_6 (1 << 6)
+#define OASTARTTRIG2_INVERT_A_7 (1 << 7)
+#define OASTARTTRIG2_INVERT_A_8 (1 << 8)
+#define OASTARTTRIG2_INVERT_A_9 (1 << 9)
+#define OASTARTTRIG2_INVERT_A_10 (1 << 10)
+#define OASTARTTRIG2_INVERT_A_11 (1 << 11)
+#define OASTARTTRIG2_INVERT_A_12 (1 << 12)
+#define OASTARTTRIG2_INVERT_A_13 (1 << 13)
+#define OASTARTTRIG2_INVERT_A_14 (1 << 14)
+#define OASTARTTRIG2_INVERT_A_15 (1 << 15)
+#define OASTARTTRIG2_INVERT_B_0 (1 << 16)
+#define OASTARTTRIG2_INVERT_B_1 (1 << 17)
+#define OASTARTTRIG2_INVERT_B_2 (1 << 18)
+#define OASTARTTRIG2_INVERT_B_3 (1 << 19)
+#define OASTARTTRIG2_INVERT_C_0 (1 << 20)
+#define OASTARTTRIG2_INVERT_C_1 (1 << 21)
+#define OASTARTTRIG2_INVERT_D_0 (1 << 22)
+#define OASTARTTRIG2_THRESHOLD_ENABLE (1 << 23)
+#define OASTARTTRIG2_START_TRIG_FLAG_MBZ (1 << 24)
+#define OASTARTTRIG2_EVENT_SELECT_0 (1 << 28)
+#define OASTARTTRIG2_EVENT_SELECT_1 (1 << 29)
+#define OASTARTTRIG2_EVENT_SELECT_2 (1 << 30)
+#define OASTARTTRIG2_EVENT_SELECT_3 (1 << 31)
#define OASTARTTRIG3 _MMIO(0x2718)
#define OASTARTTRIG3_NOA_SELECT_MASK 0xf
@@ -658,35 +676,35 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OASTARTTRIG5_THRESHOLD_MASK 0xffff
#define OASTARTTRIG6 _MMIO(0x2724)
-#define OASTARTTRIG6_INVERT_A_0 (1<<0)
-#define OASTARTTRIG6_INVERT_A_1 (1<<1)
-#define OASTARTTRIG6_INVERT_A_2 (1<<2)
-#define OASTARTTRIG6_INVERT_A_3 (1<<3)
-#define OASTARTTRIG6_INVERT_A_4 (1<<4)
-#define OASTARTTRIG6_INVERT_A_5 (1<<5)
-#define OASTARTTRIG6_INVERT_A_6 (1<<6)
-#define OASTARTTRIG6_INVERT_A_7 (1<<7)
-#define OASTARTTRIG6_INVERT_A_8 (1<<8)
-#define OASTARTTRIG6_INVERT_A_9 (1<<9)
-#define OASTARTTRIG6_INVERT_A_10 (1<<10)
-#define OASTARTTRIG6_INVERT_A_11 (1<<11)
-#define OASTARTTRIG6_INVERT_A_12 (1<<12)
-#define OASTARTTRIG6_INVERT_A_13 (1<<13)
-#define OASTARTTRIG6_INVERT_A_14 (1<<14)
-#define OASTARTTRIG6_INVERT_A_15 (1<<15)
-#define OASTARTTRIG6_INVERT_B_0 (1<<16)
-#define OASTARTTRIG6_INVERT_B_1 (1<<17)
-#define OASTARTTRIG6_INVERT_B_2 (1<<18)
-#define OASTARTTRIG6_INVERT_B_3 (1<<19)
-#define OASTARTTRIG6_INVERT_C_0 (1<<20)
-#define OASTARTTRIG6_INVERT_C_1 (1<<21)
-#define OASTARTTRIG6_INVERT_D_0 (1<<22)
-#define OASTARTTRIG6_THRESHOLD_ENABLE (1<<23)
-#define OASTARTTRIG6_START_TRIG_FLAG_MBZ (1<<24)
-#define OASTARTTRIG6_EVENT_SELECT_4 (1<<28)
-#define OASTARTTRIG6_EVENT_SELECT_5 (1<<29)
-#define OASTARTTRIG6_EVENT_SELECT_6 (1<<30)
-#define OASTARTTRIG6_EVENT_SELECT_7 (1<<31)
+#define OASTARTTRIG6_INVERT_A_0 (1 << 0)
+#define OASTARTTRIG6_INVERT_A_1 (1 << 1)
+#define OASTARTTRIG6_INVERT_A_2 (1 << 2)
+#define OASTARTTRIG6_INVERT_A_3 (1 << 3)
+#define OASTARTTRIG6_INVERT_A_4 (1 << 4)
+#define OASTARTTRIG6_INVERT_A_5 (1 << 5)
+#define OASTARTTRIG6_INVERT_A_6 (1 << 6)
+#define OASTARTTRIG6_INVERT_A_7 (1 << 7)
+#define OASTARTTRIG6_INVERT_A_8 (1 << 8)
+#define OASTARTTRIG6_INVERT_A_9 (1 << 9)
+#define OASTARTTRIG6_INVERT_A_10 (1 << 10)
+#define OASTARTTRIG6_INVERT_A_11 (1 << 11)
+#define OASTARTTRIG6_INVERT_A_12 (1 << 12)
+#define OASTARTTRIG6_INVERT_A_13 (1 << 13)
+#define OASTARTTRIG6_INVERT_A_14 (1 << 14)
+#define OASTARTTRIG6_INVERT_A_15 (1 << 15)
+#define OASTARTTRIG6_INVERT_B_0 (1 << 16)
+#define OASTARTTRIG6_INVERT_B_1 (1 << 17)
+#define OASTARTTRIG6_INVERT_B_2 (1 << 18)
+#define OASTARTTRIG6_INVERT_B_3 (1 << 19)
+#define OASTARTTRIG6_INVERT_C_0 (1 << 20)
+#define OASTARTTRIG6_INVERT_C_1 (1 << 21)
+#define OASTARTTRIG6_INVERT_D_0 (1 << 22)
+#define OASTARTTRIG6_THRESHOLD_ENABLE (1 << 23)
+#define OASTARTTRIG6_START_TRIG_FLAG_MBZ (1 << 24)
+#define OASTARTTRIG6_EVENT_SELECT_4 (1 << 28)
+#define OASTARTTRIG6_EVENT_SELECT_5 (1 << 29)
+#define OASTARTTRIG6_EVENT_SELECT_6 (1 << 30)
+#define OASTARTTRIG6_EVENT_SELECT_7 (1 << 31)
#define OASTARTTRIG7 _MMIO(0x2728)
#define OASTARTTRIG7_NOA_SELECT_MASK 0xf
@@ -715,31 +733,31 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OAREPORTTRIG1_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
#define OAREPORTTRIG2 _MMIO(0x2744)
-#define OAREPORTTRIG2_INVERT_A_0 (1<<0)
-#define OAREPORTTRIG2_INVERT_A_1 (1<<1)
-#define OAREPORTTRIG2_INVERT_A_2 (1<<2)
-#define OAREPORTTRIG2_INVERT_A_3 (1<<3)
-#define OAREPORTTRIG2_INVERT_A_4 (1<<4)
-#define OAREPORTTRIG2_INVERT_A_5 (1<<5)
-#define OAREPORTTRIG2_INVERT_A_6 (1<<6)
-#define OAREPORTTRIG2_INVERT_A_7 (1<<7)
-#define OAREPORTTRIG2_INVERT_A_8 (1<<8)
-#define OAREPORTTRIG2_INVERT_A_9 (1<<9)
-#define OAREPORTTRIG2_INVERT_A_10 (1<<10)
-#define OAREPORTTRIG2_INVERT_A_11 (1<<11)
-#define OAREPORTTRIG2_INVERT_A_12 (1<<12)
-#define OAREPORTTRIG2_INVERT_A_13 (1<<13)
-#define OAREPORTTRIG2_INVERT_A_14 (1<<14)
-#define OAREPORTTRIG2_INVERT_A_15 (1<<15)
-#define OAREPORTTRIG2_INVERT_B_0 (1<<16)
-#define OAREPORTTRIG2_INVERT_B_1 (1<<17)
-#define OAREPORTTRIG2_INVERT_B_2 (1<<18)
-#define OAREPORTTRIG2_INVERT_B_3 (1<<19)
-#define OAREPORTTRIG2_INVERT_C_0 (1<<20)
-#define OAREPORTTRIG2_INVERT_C_1 (1<<21)
-#define OAREPORTTRIG2_INVERT_D_0 (1<<22)
-#define OAREPORTTRIG2_THRESHOLD_ENABLE (1<<23)
-#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1<<31)
+#define OAREPORTTRIG2_INVERT_A_0 (1 << 0)
+#define OAREPORTTRIG2_INVERT_A_1 (1 << 1)
+#define OAREPORTTRIG2_INVERT_A_2 (1 << 2)
+#define OAREPORTTRIG2_INVERT_A_3 (1 << 3)
+#define OAREPORTTRIG2_INVERT_A_4 (1 << 4)
+#define OAREPORTTRIG2_INVERT_A_5 (1 << 5)
+#define OAREPORTTRIG2_INVERT_A_6 (1 << 6)
+#define OAREPORTTRIG2_INVERT_A_7 (1 << 7)
+#define OAREPORTTRIG2_INVERT_A_8 (1 << 8)
+#define OAREPORTTRIG2_INVERT_A_9 (1 << 9)
+#define OAREPORTTRIG2_INVERT_A_10 (1 << 10)
+#define OAREPORTTRIG2_INVERT_A_11 (1 << 11)
+#define OAREPORTTRIG2_INVERT_A_12 (1 << 12)
+#define OAREPORTTRIG2_INVERT_A_13 (1 << 13)
+#define OAREPORTTRIG2_INVERT_A_14 (1 << 14)
+#define OAREPORTTRIG2_INVERT_A_15 (1 << 15)
+#define OAREPORTTRIG2_INVERT_B_0 (1 << 16)
+#define OAREPORTTRIG2_INVERT_B_1 (1 << 17)
+#define OAREPORTTRIG2_INVERT_B_2 (1 << 18)
+#define OAREPORTTRIG2_INVERT_B_3 (1 << 19)
+#define OAREPORTTRIG2_INVERT_C_0 (1 << 20)
+#define OAREPORTTRIG2_INVERT_C_1 (1 << 21)
+#define OAREPORTTRIG2_INVERT_D_0 (1 << 22)
+#define OAREPORTTRIG2_THRESHOLD_ENABLE (1 << 23)
+#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1 << 31)
#define OAREPORTTRIG3 _MMIO(0x2748)
#define OAREPORTTRIG3_NOA_SELECT_MASK 0xf
@@ -768,31 +786,31 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OAREPORTTRIG5_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
#define OAREPORTTRIG6 _MMIO(0x2754)
-#define OAREPORTTRIG6_INVERT_A_0 (1<<0)
-#define OAREPORTTRIG6_INVERT_A_1 (1<<1)
-#define OAREPORTTRIG6_INVERT_A_2 (1<<2)
-#define OAREPORTTRIG6_INVERT_A_3 (1<<3)
-#define OAREPORTTRIG6_INVERT_A_4 (1<<4)
-#define OAREPORTTRIG6_INVERT_A_5 (1<<5)
-#define OAREPORTTRIG6_INVERT_A_6 (1<<6)
-#define OAREPORTTRIG6_INVERT_A_7 (1<<7)
-#define OAREPORTTRIG6_INVERT_A_8 (1<<8)
-#define OAREPORTTRIG6_INVERT_A_9 (1<<9)
-#define OAREPORTTRIG6_INVERT_A_10 (1<<10)
-#define OAREPORTTRIG6_INVERT_A_11 (1<<11)
-#define OAREPORTTRIG6_INVERT_A_12 (1<<12)
-#define OAREPORTTRIG6_INVERT_A_13 (1<<13)
-#define OAREPORTTRIG6_INVERT_A_14 (1<<14)
-#define OAREPORTTRIG6_INVERT_A_15 (1<<15)
-#define OAREPORTTRIG6_INVERT_B_0 (1<<16)
-#define OAREPORTTRIG6_INVERT_B_1 (1<<17)
-#define OAREPORTTRIG6_INVERT_B_2 (1<<18)
-#define OAREPORTTRIG6_INVERT_B_3 (1<<19)
-#define OAREPORTTRIG6_INVERT_C_0 (1<<20)
-#define OAREPORTTRIG6_INVERT_C_1 (1<<21)
-#define OAREPORTTRIG6_INVERT_D_0 (1<<22)
-#define OAREPORTTRIG6_THRESHOLD_ENABLE (1<<23)
-#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1<<31)
+#define OAREPORTTRIG6_INVERT_A_0 (1 << 0)
+#define OAREPORTTRIG6_INVERT_A_1 (1 << 1)
+#define OAREPORTTRIG6_INVERT_A_2 (1 << 2)
+#define OAREPORTTRIG6_INVERT_A_3 (1 << 3)
+#define OAREPORTTRIG6_INVERT_A_4 (1 << 4)
+#define OAREPORTTRIG6_INVERT_A_5 (1 << 5)
+#define OAREPORTTRIG6_INVERT_A_6 (1 << 6)
+#define OAREPORTTRIG6_INVERT_A_7 (1 << 7)
+#define OAREPORTTRIG6_INVERT_A_8 (1 << 8)
+#define OAREPORTTRIG6_INVERT_A_9 (1 << 9)
+#define OAREPORTTRIG6_INVERT_A_10 (1 << 10)
+#define OAREPORTTRIG6_INVERT_A_11 (1 << 11)
+#define OAREPORTTRIG6_INVERT_A_12 (1 << 12)
+#define OAREPORTTRIG6_INVERT_A_13 (1 << 13)
+#define OAREPORTTRIG6_INVERT_A_14 (1 << 14)
+#define OAREPORTTRIG6_INVERT_A_15 (1 << 15)
+#define OAREPORTTRIG6_INVERT_B_0 (1 << 16)
+#define OAREPORTTRIG6_INVERT_B_1 (1 << 17)
+#define OAREPORTTRIG6_INVERT_B_2 (1 << 18)
+#define OAREPORTTRIG6_INVERT_B_3 (1 << 19)
+#define OAREPORTTRIG6_INVERT_C_0 (1 << 20)
+#define OAREPORTTRIG6_INVERT_C_1 (1 << 21)
+#define OAREPORTTRIG6_INVERT_D_0 (1 << 22)
+#define OAREPORTTRIG6_THRESHOLD_ENABLE (1 << 23)
+#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1 << 31)
#define OAREPORTTRIG7 _MMIO(0x2758)
#define OAREPORTTRIG7_NOA_SELECT_MASK 0xf
@@ -828,9 +846,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OACEC_COMPARE_VALUE_MASK 0xffff
#define OACEC_COMPARE_VALUE_SHIFT 3
-#define OACEC_SELECT_NOA (0<<19)
-#define OACEC_SELECT_PREV (1<<19)
-#define OACEC_SELECT_BOOLEAN (2<<19)
+#define OACEC_SELECT_NOA (0 << 19)
+#define OACEC_SELECT_PREV (1 << 19)
+#define OACEC_SELECT_BOOLEAN (2 << 19)
/* CECX_1 */
#define OACEC_MASK_MASK 0xffff
@@ -948,9 +966,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
* Reset registers
*/
#define DEBUG_RESET_I830 _MMIO(0x6070)
-#define DEBUG_RESET_FULL (1<<7)
-#define DEBUG_RESET_RENDER (1<<8)
-#define DEBUG_RESET_DISPLAY (1<<9)
+#define DEBUG_RESET_FULL (1 << 7)
+#define DEBUG_RESET_RENDER (1 << 8)
+#define DEBUG_RESET_DISPLAY (1 << 9)
/*
* IOSF sideband
@@ -961,7 +979,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define IOSF_PORT_SHIFT 8
#define IOSF_BYTE_ENABLES_SHIFT 4
#define IOSF_BAR_SHIFT 1
-#define IOSF_SB_BUSY (1<<0)
+#define IOSF_SB_BUSY (1 << 0)
#define IOSF_PORT_BUNIT 0x03
#define IOSF_PORT_PUNIT 0x04
#define IOSF_PORT_NC 0x11
@@ -1044,13 +1062,13 @@ enum i915_power_well_id {
/*
* HSW/BDW
- * - HSW_PWR_WELL_CTL_DRIVER(0) (status bit: id*2, req bit: id*2+1)
+ * - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1)
*/
HSW_DISP_PW_GLOBAL = 15,
/*
* GEN9+
- * - HSW_PWR_WELL_CTL_DRIVER(0) (status bit: id*2, req bit: id*2+1)
+ * - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1)
*/
SKL_DISP_PW_MISC_IO = 0,
SKL_DISP_PW_DDI_A_E,
@@ -1074,17 +1092,54 @@ enum i915_power_well_id {
SKL_DISP_PW_2,
/* - custom power wells */
- SKL_DISP_PW_DC_OFF,
BXT_DPIO_CMN_A,
BXT_DPIO_CMN_BC,
- GLK_DPIO_CMN_C, /* 19 */
+ GLK_DPIO_CMN_C, /* 18 */
+
+ /*
+ * GEN11+
+ * - _HSW_PWR_WELL_CTL1-4
+ * (status bit: (id&15)*2, req bit:(id&15)*2+1)
+ */
+ ICL_DISP_PW_1 = 0,
+ ICL_DISP_PW_2,
+ ICL_DISP_PW_3,
+ ICL_DISP_PW_4,
+
+ /*
+ * - _HSW_PWR_WELL_CTL_AUX1/2/4
+ * (status bit: (id&15)*2, req bit:(id&15)*2+1)
+ */
+ ICL_DISP_PW_AUX_A = 16,
+ ICL_DISP_PW_AUX_B,
+ ICL_DISP_PW_AUX_C,
+ ICL_DISP_PW_AUX_D,
+ ICL_DISP_PW_AUX_E,
+ ICL_DISP_PW_AUX_F,
+
+ ICL_DISP_PW_AUX_TBT1 = 24,
+ ICL_DISP_PW_AUX_TBT2,
+ ICL_DISP_PW_AUX_TBT3,
+ ICL_DISP_PW_AUX_TBT4,
+
+ /*
+ * - _HSW_PWR_WELL_CTL_DDI1/2/4
+ * (status bit: (id&15)*2, req bit:(id&15)*2+1)
+ */
+ ICL_DISP_PW_DDI_A = 32,
+ ICL_DISP_PW_DDI_B,
+ ICL_DISP_PW_DDI_C,
+ ICL_DISP_PW_DDI_D,
+ ICL_DISP_PW_DDI_E,
+ ICL_DISP_PW_DDI_F, /* 37 */
/*
* Multiple platforms.
* Must start following the highest ID of any platform.
* - custom power wells
*/
- I915_DISP_PW_ALWAYS_ON = 20,
+ SKL_DISP_PW_DC_OFF = 38,
+ I915_DISP_PW_ALWAYS_ON,
};
#define PUNIT_REG_PWRGT_CTRL 0x60
@@ -1098,8 +1153,8 @@ enum i915_power_well_id {
#define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
#define PUNIT_REG_GPU_FREQ_STS 0xd8
-#define GPLLENABLE (1<<4)
-#define GENFREQSTATUS (1<<0)
+#define GPLLENABLE (1 << 4)
+#define GENFREQSTATUS (1 << 0)
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
#define PUNIT_REG_CZ_TIMESTAMP 0xce
@@ -1141,11 +1196,11 @@ enum i915_power_well_id {
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
-#define VLV_TURBO_SOC_OVERRIDE 0x04
-#define VLV_OVERRIDE_EN 1
-#define VLV_SOC_TDP_EN (1 << 1)
-#define VLV_BIAS_CPU_125_SOC_875 (6 << 2)
-#define CHV_BIAS_CPU_50_SOC_50 (3 << 2)
+#define VLV_TURBO_SOC_OVERRIDE 0x04
+#define VLV_OVERRIDE_EN 1
+#define VLV_SOC_TDP_EN (1 << 1)
+#define VLV_BIAS_CPU_125_SOC_875 (6 << 2)
+#define CHV_BIAS_CPU_50_SOC_50 (3 << 2)
/* vlv2 north clock has */
#define CCK_FUSE_REG 0x8
@@ -1194,10 +1249,10 @@ enum i915_power_well_id {
#define DPIO_DEVFN 0
#define DPIO_CTL _MMIO(VLV_DISPLAY_BASE + 0x2110)
-#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
-#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
-#define DPIO_SFR_BYPASS (1<<1)
-#define DPIO_CMNRST (1<<0)
+#define DPIO_MODSEL1 (1 << 3) /* if ref clk b == 27 */
+#define DPIO_MODSEL0 (1 << 2) /* if ref clk a == 27 */
+#define DPIO_SFR_BYPASS (1 << 1)
+#define DPIO_CMNRST (1 << 0)
#define DPIO_PHY(pipe) ((pipe) >> 1)
#define DPIO_PHY_IOSF_PORT(phy) (dev_priv->dpio_phy_iosf_port[phy])
@@ -1215,7 +1270,7 @@ enum i915_power_well_id {
#define DPIO_P1_SHIFT (21) /* 3 bits */
#define DPIO_P2_SHIFT (16) /* 5 bits */
#define DPIO_N_SHIFT (12) /* 4 bits */
-#define DPIO_ENABLE_CALIBRATION (1<<11)
+#define DPIO_ENABLE_CALIBRATION (1 << 11)
#define DPIO_M1DIV_SHIFT (8) /* 3 bits */
#define DPIO_M2DIV_MASK 0xff
#define _VLV_PLL_DW3_CH1 0x802c
@@ -1264,10 +1319,10 @@ enum i915_power_well_id {
#define _VLV_PCS_DW0_CH0 0x8200
#define _VLV_PCS_DW0_CH1 0x8400
-#define DPIO_PCS_TX_LANE2_RESET (1<<16)
-#define DPIO_PCS_TX_LANE1_RESET (1<<7)
-#define DPIO_LEFT_TXFIFO_RST_MASTER2 (1<<4)
-#define DPIO_RIGHT_TXFIFO_RST_MASTER2 (1<<3)
+#define DPIO_PCS_TX_LANE2_RESET (1 << 16)
+#define DPIO_PCS_TX_LANE1_RESET (1 << 7)
+#define DPIO_LEFT_TXFIFO_RST_MASTER2 (1 << 4)
+#define DPIO_RIGHT_TXFIFO_RST_MASTER2 (1 << 3)
#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
#define _VLV_PCS01_DW0_CH0 0x200
@@ -1279,11 +1334,11 @@ enum i915_power_well_id {
#define _VLV_PCS_DW1_CH0 0x8204
#define _VLV_PCS_DW1_CH1 0x8404
-#define CHV_PCS_REQ_SOFTRESET_EN (1<<23)
-#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1<<22)
-#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
+#define CHV_PCS_REQ_SOFTRESET_EN (1 << 23)
+#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1 << 22)
+#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1 << 21)
#define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6)
-#define DPIO_PCS_CLK_SOFT_RESET (1<<5)
+#define DPIO_PCS_CLK_SOFT_RESET (1 << 5)
#define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1)
#define _VLV_PCS01_DW1_CH0 0x204
@@ -1308,12 +1363,12 @@ enum i915_power_well_id {
#define _VLV_PCS_DW9_CH0 0x8224
#define _VLV_PCS_DW9_CH1 0x8424
-#define DPIO_PCS_TX2MARGIN_MASK (0x7<<13)
-#define DPIO_PCS_TX2MARGIN_000 (0<<13)
-#define DPIO_PCS_TX2MARGIN_101 (1<<13)
-#define DPIO_PCS_TX1MARGIN_MASK (0x7<<10)
-#define DPIO_PCS_TX1MARGIN_000 (0<<10)
-#define DPIO_PCS_TX1MARGIN_101 (1<<10)
+#define DPIO_PCS_TX2MARGIN_MASK (0x7 << 13)
+#define DPIO_PCS_TX2MARGIN_000 (0 << 13)
+#define DPIO_PCS_TX2MARGIN_101 (1 << 13)
+#define DPIO_PCS_TX1MARGIN_MASK (0x7 << 10)
+#define DPIO_PCS_TX1MARGIN_000 (0 << 10)
+#define DPIO_PCS_TX1MARGIN_101 (1 << 10)
#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
#define _VLV_PCS01_DW9_CH0 0x224
@@ -1325,14 +1380,14 @@ enum i915_power_well_id {
#define _CHV_PCS_DW10_CH0 0x8228
#define _CHV_PCS_DW10_CH1 0x8428
-#define DPIO_PCS_SWING_CALC_TX0_TX2 (1<<30)
-#define DPIO_PCS_SWING_CALC_TX1_TX3 (1<<31)
-#define DPIO_PCS_TX2DEEMP_MASK (0xf<<24)
-#define DPIO_PCS_TX2DEEMP_9P5 (0<<24)
-#define DPIO_PCS_TX2DEEMP_6P0 (2<<24)
-#define DPIO_PCS_TX1DEEMP_MASK (0xf<<16)
-#define DPIO_PCS_TX1DEEMP_9P5 (0<<16)
-#define DPIO_PCS_TX1DEEMP_6P0 (2<<16)
+#define DPIO_PCS_SWING_CALC_TX0_TX2 (1 << 30)
+#define DPIO_PCS_SWING_CALC_TX1_TX3 (1 << 31)
+#define DPIO_PCS_TX2DEEMP_MASK (0xf << 24)
+#define DPIO_PCS_TX2DEEMP_9P5 (0 << 24)
+#define DPIO_PCS_TX2DEEMP_6P0 (2 << 24)
+#define DPIO_PCS_TX1DEEMP_MASK (0xf << 16)
+#define DPIO_PCS_TX1DEEMP_9P5 (0 << 16)
+#define DPIO_PCS_TX1DEEMP_6P0 (2 << 16)
#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
#define _VLV_PCS01_DW10_CH0 0x0228
@@ -1344,10 +1399,10 @@ enum i915_power_well_id {
#define _VLV_PCS_DW11_CH0 0x822c
#define _VLV_PCS_DW11_CH1 0x842c
-#define DPIO_TX2_STAGGER_MASK(x) ((x)<<24)
-#define DPIO_LANEDESKEW_STRAP_OVRD (1<<3)
-#define DPIO_LEFT_TXFIFO_RST_MASTER (1<<1)
-#define DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0)
+#define DPIO_TX2_STAGGER_MASK(x) ((x) << 24)
+#define DPIO_LANEDESKEW_STRAP_OVRD (1 << 3)
+#define DPIO_LEFT_TXFIFO_RST_MASTER (1 << 1)
+#define DPIO_RIGHT_TXFIFO_RST_MASTER (1 << 0)
#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
#define _VLV_PCS01_DW11_CH0 0x022c
@@ -1366,11 +1421,11 @@ enum i915_power_well_id {
#define _VLV_PCS_DW12_CH0 0x8230
#define _VLV_PCS_DW12_CH1 0x8430
-#define DPIO_TX2_STAGGER_MULT(x) ((x)<<20)
-#define DPIO_TX1_STAGGER_MULT(x) ((x)<<16)
-#define DPIO_TX1_STAGGER_MASK(x) ((x)<<8)
-#define DPIO_LANESTAGGER_STRAP_OVRD (1<<6)
-#define DPIO_LANESTAGGER_STRAP(x) ((x)<<0)
+#define DPIO_TX2_STAGGER_MULT(x) ((x) << 20)
+#define DPIO_TX1_STAGGER_MULT(x) ((x) << 16)
+#define DPIO_TX1_STAGGER_MASK(x) ((x) << 8)
+#define DPIO_LANESTAGGER_STRAP_OVRD (1 << 6)
+#define DPIO_LANESTAGGER_STRAP(x) ((x) << 0)
#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
#define _VLV_PCS_DW14_CH0 0x8238
@@ -1391,7 +1446,7 @@ enum i915_power_well_id {
#define _VLV_TX_DW3_CH0 0x828c
#define _VLV_TX_DW3_CH1 0x848c
/* The following bit for CHV phy */
-#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1<<27)
+#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1 << 27)
#define DPIO_SWING_MARGIN101_SHIFT 16
#define DPIO_SWING_MARGIN101_MASK (0xff << DPIO_SWING_MARGIN101_SHIFT)
#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
@@ -1410,7 +1465,7 @@ enum i915_power_well_id {
#define _VLV_TX_DW5_CH0 0x8294
#define _VLV_TX_DW5_CH1 0x8494
-#define DPIO_TX_OCALINIT_EN (1<<31)
+#define DPIO_TX_OCALINIT_EN (1 << 31)
#define VLV_TX_DW5(ch) _PORT(ch, _VLV_TX_DW5_CH0, _VLV_TX_DW5_CH1)
#define _VLV_TX_DW11_CH0 0x82ac
@@ -1640,10 +1695,10 @@ enum i915_power_well_id {
#define PORT_PLL_LOCK_THRESHOLD_SHIFT 1
#define PORT_PLL_LOCK_THRESHOLD_MASK (0x7 << PORT_PLL_LOCK_THRESHOLD_SHIFT)
/* PORT_PLL_10_A */
-#define PORT_PLL_DCO_AMP_OVR_EN_H (1<<27)
+#define PORT_PLL_DCO_AMP_OVR_EN_H (1 << 27)
#define PORT_PLL_DCO_AMP_DEFAULT 15
#define PORT_PLL_DCO_AMP_MASK 0x3c00
-#define PORT_PLL_DCO_AMP(x) ((x)<<10)
+#define PORT_PLL_DCO_AMP(x) ((x) << 10)
#define _PORT_PLL_BASE(phy, ch) _BXT_PHY_CH(phy, ch, \
_PORT_PLL_0_B, \
_PORT_PLL_0_C)
@@ -1666,6 +1721,26 @@ enum i915_power_well_id {
#define ICL_PORT_CL_DW5(port) _MMIO_PORT(port, _ICL_PORT_CL_DW5_A, \
_ICL_PORT_CL_DW5_B)
+#define _CNL_PORT_CL_DW10_A 0x162028
+#define _ICL_PORT_CL_DW10_B 0x6c028
+#define ICL_PORT_CL_DW10(port) _MMIO_PORT(port, \
+ _CNL_PORT_CL_DW10_A, \
+ _ICL_PORT_CL_DW10_B)
+#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25)
+#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25
+#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24)
+#define PWR_UP_ALL_LANES (0x0 << 4)
+#define PWR_DOWN_LN_3_2_1 (0xe << 4)
+#define PWR_DOWN_LN_3_2 (0xc << 4)
+#define PWR_DOWN_LN_3 (0x8 << 4)
+#define PWR_DOWN_LN_2_1_0 (0x7 << 4)
+#define PWR_DOWN_LN_1_0 (0x3 << 4)
+#define PWR_DOWN_LN_1 (0x2 << 4)
+#define PWR_DOWN_LN_3_1 (0xa << 4)
+#define PWR_DOWN_LN_3_1_0 (0xb << 4)
+#define PWR_DOWN_LN_MASK (0xf << 4)
+#define PWR_DOWN_LN_SHIFT 4
+
#define _PORT_CL1CM_DW9_A 0x162024
#define _PORT_CL1CM_DW9_BC 0x6C024
#define IREF0RC_OFFSET_SHIFT 8
@@ -1678,6 +1753,13 @@ enum i915_power_well_id {
#define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT)
#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
+#define _ICL_PORT_CL_DW12_A 0x162030
+#define _ICL_PORT_CL_DW12_B 0x6C030
+#define ICL_LANE_ENABLE_AUX (1 << 0)
+#define ICL_PORT_CL_DW12(port) _MMIO_PORT((port), \
+ _ICL_PORT_CL_DW12_A, \
+ _ICL_PORT_CL_DW12_B)
+
#define _PORT_CL1CM_DW28_A 0x162070
#define _PORT_CL1CM_DW28_BC 0x6C070
#define OCL1_POWER_DOWN_EN (1 << 23)
@@ -1715,16 +1797,22 @@ enum i915_power_well_id {
_CNL_PORT_PCS_DW1_LN0_D, \
_CNL_PORT_PCS_DW1_LN0_AE, \
_CNL_PORT_PCS_DW1_LN0_F))
+
#define _ICL_PORT_PCS_DW1_GRP_A 0x162604
#define _ICL_PORT_PCS_DW1_GRP_B 0x6C604
#define _ICL_PORT_PCS_DW1_LN0_A 0x162804
#define _ICL_PORT_PCS_DW1_LN0_B 0x6C804
+#define _ICL_PORT_PCS_DW1_AUX_A 0x162304
+#define _ICL_PORT_PCS_DW1_AUX_B 0x6c304
#define ICL_PORT_PCS_DW1_GRP(port) _MMIO_PORT(port,\
_ICL_PORT_PCS_DW1_GRP_A, \
_ICL_PORT_PCS_DW1_GRP_B)
#define ICL_PORT_PCS_DW1_LN0(port) _MMIO_PORT(port, \
_ICL_PORT_PCS_DW1_LN0_A, \
_ICL_PORT_PCS_DW1_LN0_B)
+#define ICL_PORT_PCS_DW1_AUX(port) _MMIO_PORT(port, \
+ _ICL_PORT_PCS_DW1_AUX_A, \
+ _ICL_PORT_PCS_DW1_AUX_B)
#define COMMON_KEEPER_EN (1 << 26)
/* CNL Port TX registers */
@@ -1745,7 +1833,7 @@ enum i915_power_well_id {
_CNL_PORT_TX_D_GRP_OFFSET, \
_CNL_PORT_TX_AE_GRP_OFFSET, \
_CNL_PORT_TX_F_GRP_OFFSET) + \
- 4*(dw))
+ 4 * (dw))
#define _CNL_PORT_TX_DW_LN0(port, dw) (_PICK((port), \
_CNL_PORT_TX_AE_LN0_OFFSET, \
_CNL_PORT_TX_B_LN0_OFFSET, \
@@ -1753,7 +1841,7 @@ enum i915_power_well_id {
_CNL_PORT_TX_D_LN0_OFFSET, \
_CNL_PORT_TX_AE_LN0_OFFSET, \
_CNL_PORT_TX_F_LN0_OFFSET) + \
- 4*(dw))
+ 4 * (dw))
#define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 2))
#define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 2))
@@ -1761,16 +1849,23 @@ enum i915_power_well_id {
#define _ICL_PORT_TX_DW2_GRP_B 0x6C688
#define _ICL_PORT_TX_DW2_LN0_A 0x162888
#define _ICL_PORT_TX_DW2_LN0_B 0x6C888
+#define _ICL_PORT_TX_DW2_AUX_A 0x162388
+#define _ICL_PORT_TX_DW2_AUX_B 0x6c388
#define ICL_PORT_TX_DW2_GRP(port) _MMIO_PORT(port, \
_ICL_PORT_TX_DW2_GRP_A, \
_ICL_PORT_TX_DW2_GRP_B)
#define ICL_PORT_TX_DW2_LN0(port) _MMIO_PORT(port, \
_ICL_PORT_TX_DW2_LN0_A, \
_ICL_PORT_TX_DW2_LN0_B)
+#define ICL_PORT_TX_DW2_AUX(port) _MMIO_PORT(port, \
+ _ICL_PORT_TX_DW2_AUX_A, \
+ _ICL_PORT_TX_DW2_AUX_B)
#define SWING_SEL_UPPER(x) (((x) >> 3) << 15)
#define SWING_SEL_UPPER_MASK (1 << 15)
#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11)
#define SWING_SEL_LOWER_MASK (0x7 << 11)
+#define FRC_LATENCY_OPTIM_MASK (0x7 << 8)
+#define FRC_LATENCY_OPTIM_VAL(x) ((x) << 8)
#define RCOMP_SCALAR(x) ((x) << 0)
#define RCOMP_SCALAR_MASK (0xFF << 0)
@@ -1779,21 +1874,26 @@ enum i915_power_well_id {
#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 4))
#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4))
#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \
- (ln * (_CNL_PORT_TX_DW4_LN1_AE - \
+ ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
_CNL_PORT_TX_DW4_LN0_AE)))
#define _ICL_PORT_TX_DW4_GRP_A 0x162690
#define _ICL_PORT_TX_DW4_GRP_B 0x6C690
#define _ICL_PORT_TX_DW4_LN0_A 0x162890
#define _ICL_PORT_TX_DW4_LN1_A 0x162990
#define _ICL_PORT_TX_DW4_LN0_B 0x6C890
+#define _ICL_PORT_TX_DW4_AUX_A 0x162390
+#define _ICL_PORT_TX_DW4_AUX_B 0x6c390
#define ICL_PORT_TX_DW4_GRP(port) _MMIO_PORT(port, \
_ICL_PORT_TX_DW4_GRP_A, \
_ICL_PORT_TX_DW4_GRP_B)
#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_PORT(port, \
_ICL_PORT_TX_DW4_LN0_A, \
_ICL_PORT_TX_DW4_LN0_B) + \
- (ln * (_ICL_PORT_TX_DW4_LN1_A - \
- _ICL_PORT_TX_DW4_LN0_A)))
+ ((ln) * (_ICL_PORT_TX_DW4_LN1_A - \
+ _ICL_PORT_TX_DW4_LN0_A)))
+#define ICL_PORT_TX_DW4_AUX(port) _MMIO_PORT(port, \
+ _ICL_PORT_TX_DW4_AUX_A, \
+ _ICL_PORT_TX_DW4_AUX_B)
#define LOADGEN_SELECT (1 << 31)
#define POST_CURSOR_1(x) ((x) << 12)
#define POST_CURSOR_1_MASK (0x3F << 12)
@@ -1808,12 +1908,17 @@ enum i915_power_well_id {
#define _ICL_PORT_TX_DW5_GRP_B 0x6C694
#define _ICL_PORT_TX_DW5_LN0_A 0x162894
#define _ICL_PORT_TX_DW5_LN0_B 0x6C894
+#define _ICL_PORT_TX_DW5_AUX_A 0x162394
+#define _ICL_PORT_TX_DW5_AUX_B 0x6c394
#define ICL_PORT_TX_DW5_GRP(port) _MMIO_PORT(port, \
_ICL_PORT_TX_DW5_GRP_A, \
_ICL_PORT_TX_DW5_GRP_B)
#define ICL_PORT_TX_DW5_LN0(port) _MMIO_PORT(port, \
_ICL_PORT_TX_DW5_LN0_A, \
_ICL_PORT_TX_DW5_LN0_B)
+#define ICL_PORT_TX_DW5_AUX(port) _MMIO_PORT(port, \
+ _ICL_PORT_TX_DW5_AUX_A, \
+ _ICL_PORT_TX_DW5_AUX_B)
#define TX_TRAINING_EN (1 << 31)
#define TAP2_DISABLE (1 << 30)
#define TAP3_DISABLE (1 << 29)
@@ -1990,6 +2095,11 @@ enum i915_power_well_id {
_ICL_PORT_COMP_DW10_A, \
_ICL_PORT_COMP_DW10_B)
+/* ICL PHY DFLEX registers */
+#define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0)
+#define DFLEXDPMLE1_DPMLETC_MASK(n) (0xf << (4 * (n)))
+#define DFLEXDPMLE1_DPMLETC(n, x) ((x) << (4 * (n)))
+
/* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C
#define _PORT_REF_DW3_BC 0x6C18C
@@ -2134,8 +2244,8 @@ enum i915_power_well_id {
/* SKL balance leg register */
#define DISPIO_CR_TX_BMU_CR0 _MMIO(0x6C00C)
/* I_boost values */
-#define BALANCE_LEG_SHIFT(port) (8+3*(port))
-#define BALANCE_LEG_MASK(port) (7<<(8+3*(port)))
+#define BALANCE_LEG_SHIFT(port) (8 + 3 * (port))
+#define BALANCE_LEG_MASK(port) (7 << (8 + 3 * (port)))
/* Balance leg disable bits */
#define BALANCE_LEG_DISABLE_SHIFT 23
#define BALANCE_LEG_DISABLE(port) (1 << (23 + (port)))
@@ -2155,10 +2265,10 @@ enum i915_power_well_id {
#define I830_FENCE_TILING_Y_SHIFT 12
#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
#define I830_FENCE_PITCH_SHIFT 4
-#define I830_FENCE_REG_VALID (1<<0)
+#define I830_FENCE_REG_VALID (1 << 0)
#define I915_FENCE_MAX_PITCH_VAL 4
#define I830_FENCE_MAX_PITCH_VAL 6
-#define I830_FENCE_MAX_SIZE_VAL (1<<8)
+#define I830_FENCE_MAX_SIZE_VAL (1 << 8)
#define I915_FENCE_START_MASK 0x0ff00000
#define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8)
@@ -2167,7 +2277,7 @@ enum i915_power_well_id {
#define FENCE_REG_965_HI(i) _MMIO(0x03000 + (i) * 8 + 4)
#define I965_FENCE_PITCH_SHIFT 2
#define I965_FENCE_TILING_Y_SHIFT 1
-#define I965_FENCE_REG_VALID (1<<0)
+#define I965_FENCE_REG_VALID (1 << 0)
#define I965_FENCE_MAX_PITCH_VAL 0x0400
#define FENCE_REG_GEN6_LO(i) _MMIO(0x100000 + (i) * 8)
@@ -2190,13 +2300,13 @@ enum i915_power_well_id {
#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */
#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */
#define PGTBL_ER _MMIO(0x02024)
-#define PRB0_BASE (0x2030-0x30)
-#define PRB1_BASE (0x2040-0x30) /* 830,gen3 */
-#define PRB2_BASE (0x2050-0x30) /* gen3 */
-#define SRB0_BASE (0x2100-0x30) /* gen2 */
-#define SRB1_BASE (0x2110-0x30) /* gen2 */
-#define SRB2_BASE (0x2120-0x30) /* 830 */
-#define SRB3_BASE (0x2130-0x30) /* 830 */
+#define PRB0_BASE (0x2030 - 0x30)
+#define PRB1_BASE (0x2040 - 0x30) /* 830,gen3 */
+#define PRB2_BASE (0x2050 - 0x30) /* gen3 */
+#define SRB0_BASE (0x2100 - 0x30) /* gen2 */
+#define SRB1_BASE (0x2110 - 0x30) /* gen2 */
+#define SRB2_BASE (0x2120 - 0x30) /* 830 */
+#define SRB3_BASE (0x2130 - 0x30) /* 830 */
#define RENDER_RING_BASE 0x02000
#define BSD_RING_BASE 0x04000
#define GEN6_BSD_RING_BASE 0x12000
@@ -2209,14 +2319,14 @@ enum i915_power_well_id {
#define GEN11_VEBOX_RING_BASE 0x1c8000
#define GEN11_VEBOX2_RING_BASE 0x1d8000
#define BLT_RING_BASE 0x22000
-#define RING_TAIL(base) _MMIO((base)+0x30)
-#define RING_HEAD(base) _MMIO((base)+0x34)
-#define RING_START(base) _MMIO((base)+0x38)
-#define RING_CTL(base) _MMIO((base)+0x3c)
+#define RING_TAIL(base) _MMIO((base) + 0x30)
+#define RING_HEAD(base) _MMIO((base) + 0x34)
+#define RING_START(base) _MMIO((base) + 0x38)
+#define RING_CTL(base) _MMIO((base) + 0x3c)
#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
-#define RING_SYNC_0(base) _MMIO((base)+0x40)
-#define RING_SYNC_1(base) _MMIO((base)+0x44)
-#define RING_SYNC_2(base) _MMIO((base)+0x48)
+#define RING_SYNC_0(base) _MMIO((base) + 0x40)
+#define RING_SYNC_1(base) _MMIO((base) + 0x44)
+#define RING_SYNC_2(base) _MMIO((base) + 0x48)
#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE))
#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE))
#define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE))
@@ -2230,21 +2340,22 @@ enum i915_power_well_id {
#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
#define GEN6_NOSYNC INVALID_MMIO_REG
-#define RING_PSMI_CTL(base) _MMIO((base)+0x50)
-#define RING_MAX_IDLE(base) _MMIO((base)+0x54)
-#define RING_HWS_PGA(base) _MMIO((base)+0x80)
-#define RING_HWS_PGA_GEN6(base) _MMIO((base)+0x2080)
-#define RING_RESET_CTL(base) _MMIO((base)+0xd0)
+#define RING_PSMI_CTL(base) _MMIO((base) + 0x50)
+#define RING_MAX_IDLE(base) _MMIO((base) + 0x54)
+#define RING_HWS_PGA(base) _MMIO((base) + 0x80)
+#define RING_HWS_PGA_GEN6(base) _MMIO((base) + 0x2080)
+#define RING_RESET_CTL(base) _MMIO((base) + 0xd0)
#define RESET_CTL_REQUEST_RESET (1 << 0)
#define RESET_CTL_READY_TO_RESET (1 << 1)
+#define RING_SEMA_WAIT_POLL(base) _MMIO((base) + 0x24c)
#define HSW_GTT_CACHE_EN _MMIO(0x4024)
#define GTT_CACHE_EN_ALL 0xF0007FFF
#define GEN7_WR_WATERMARK _MMIO(0x4028)
#define GEN7_GFX_PRIO_CTRL _MMIO(0x402C)
#define ARB_MODE _MMIO(0x4030)
-#define ARB_MODE_SWIZZLE_SNB (1<<4)
-#define ARB_MODE_SWIZZLE_IVB (1<<5)
+#define ARB_MODE_SWIZZLE_SNB (1 << 4)
+#define ARB_MODE_SWIZZLE_IVB (1 << 5)
#define GEN7_GFX_PEND_TLB0 _MMIO(0x4034)
#define GEN7_GFX_PEND_TLB1 _MMIO(0x4038)
/* L3, CVS, ZTLB, RCC, CASC LRA min, max values */
@@ -2254,30 +2365,30 @@ enum i915_power_well_id {
#define GEN7_GFX_MAX_REQ_COUNT _MMIO(0x4074)
#define GAMTARBMODE _MMIO(0x04a08)
-#define ARB_MODE_BWGTLB_DISABLE (1<<9)
-#define ARB_MODE_SWIZZLE_BDW (1<<1)
+#define ARB_MODE_BWGTLB_DISABLE (1 << 9)
+#define ARB_MODE_SWIZZLE_BDW (1 << 1)
#define RENDER_HWS_PGA_GEN7 _MMIO(0x04080)
-#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100*(engine)->hw_id)
+#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100 * (engine)->hw_id)
#define GEN8_RING_FAULT_REG _MMIO(0x4094)
#define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7)
-#define RING_FAULT_GTTSEL_MASK (1<<11)
+#define RING_FAULT_GTTSEL_MASK (1 << 11)
#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
-#define RING_FAULT_VALID (1<<0)
+#define RING_FAULT_VALID (1 << 0)
#define DONE_REG _MMIO(0x40b0)
#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
-#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index)*4)
+#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
#define BSD_HWS_PGA_GEN7 _MMIO(0x04180)
#define BLT_HWS_PGA_GEN7 _MMIO(0x04280)
#define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380)
-#define RING_ACTHD(base) _MMIO((base)+0x74)
-#define RING_ACTHD_UDW(base) _MMIO((base)+0x5c)
-#define RING_NOPID(base) _MMIO((base)+0x94)
-#define RING_IMR(base) _MMIO((base)+0xa8)
-#define RING_HWSTAM(base) _MMIO((base)+0x98)
-#define RING_TIMESTAMP(base) _MMIO((base)+0x358)
-#define RING_TIMESTAMP_UDW(base) _MMIO((base)+0x358 + 4)
+#define RING_ACTHD(base) _MMIO((base) + 0x74)
+#define RING_ACTHD_UDW(base) _MMIO((base) + 0x5c)
+#define RING_NOPID(base) _MMIO((base) + 0x94)
+#define RING_IMR(base) _MMIO((base) + 0xa8)
+#define RING_HWSTAM(base) _MMIO((base) + 0x98)
+#define RING_TIMESTAMP(base) _MMIO((base) + 0x358)
+#define RING_TIMESTAMP_UDW(base) _MMIO((base) + 0x358 + 4)
#define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
@@ -2290,24 +2401,25 @@ enum i915_power_well_id {
#define RING_VALID_MASK 0x00000001
#define RING_VALID 0x00000001
#define RING_INVALID 0x00000000
-#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
-#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
-#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
+#define RING_WAIT_I8XX (1 << 0) /* gen2, PRBx_HEAD */
+#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */
+#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
-#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base)+0x4D0) + (i)*4)
+#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4)
#define RING_MAX_NONPRIV_SLOTS 12
#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
#define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0)
-#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1<<18)
+#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1 << 18)
#define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080)
#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
-#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
-#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1<<24)
+#define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31)
+#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28)
+#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24)
#if 0
#define PRB0_TAIL _MMIO(0x2030)
@@ -2333,19 +2445,19 @@ enum i915_power_well_id {
#define GEN11_MCR_SLICE_MASK GEN11_MCR_SLICE(0xf)
#define GEN11_MCR_SUBSLICE(subslice) (((subslice) & 0x7) << 24)
#define GEN11_MCR_SUBSLICE_MASK GEN11_MCR_SUBSLICE(0x7)
-#define RING_IPEIR(base) _MMIO((base)+0x64)
-#define RING_IPEHR(base) _MMIO((base)+0x68)
+#define RING_IPEIR(base) _MMIO((base) + 0x64)
+#define RING_IPEHR(base) _MMIO((base) + 0x68)
/*
* On GEN4, only the render ring INSTDONE exists and has a different
* layout than the GEN7+ version.
* The GEN2 counterpart of this register is GEN2_INSTDONE.
*/
-#define RING_INSTDONE(base) _MMIO((base)+0x6c)
-#define RING_INSTPS(base) _MMIO((base)+0x70)
-#define RING_DMA_FADD(base) _MMIO((base)+0x78)
-#define RING_DMA_FADD_UDW(base) _MMIO((base)+0x60) /* gen8+ */
-#define RING_INSTPM(base) _MMIO((base)+0xc0)
-#define RING_MI_MODE(base) _MMIO((base)+0x9c)
+#define RING_INSTDONE(base) _MMIO((base) + 0x6c)
+#define RING_INSTPS(base) _MMIO((base) + 0x70)
+#define RING_DMA_FADD(base) _MMIO((base) + 0x78)
+#define RING_DMA_FADD_UDW(base) _MMIO((base) + 0x60) /* gen8+ */
+#define RING_INSTPM(base) _MMIO((base) + 0xc0)
+#define RING_MI_MODE(base) _MMIO((base) + 0x9c)
#define INSTPS _MMIO(0x2070) /* 965+ only */
#define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */
#define ACTHD_I965 _MMIO(0x2074)
@@ -2353,37 +2465,37 @@ enum i915_power_well_id {
#define HWS_ADDRESS_MASK 0xfffff000
#define HWS_START_ADDRESS_SHIFT 4
#define PWRCTXA _MMIO(0x2088) /* 965GM+ only */
-#define PWRCTX_EN (1<<0)
+#define PWRCTX_EN (1 << 0)
#define IPEIR _MMIO(0x2088)
#define IPEHR _MMIO(0x208c)
#define GEN2_INSTDONE _MMIO(0x2090)
#define NOPID _MMIO(0x2094)
#define HWSTAM _MMIO(0x2098)
#define DMA_FADD_I8XX _MMIO(0x20d0)
-#define RING_BBSTATE(base) _MMIO((base)+0x110)
+#define RING_BBSTATE(base) _MMIO((base) + 0x110)
#define RING_BB_PPGTT (1 << 5)
-#define RING_SBBADDR(base) _MMIO((base)+0x114) /* hsw+ */
-#define RING_SBBSTATE(base) _MMIO((base)+0x118) /* hsw+ */
-#define RING_SBBADDR_UDW(base) _MMIO((base)+0x11c) /* gen8+ */
-#define RING_BBADDR(base) _MMIO((base)+0x140)
-#define RING_BBADDR_UDW(base) _MMIO((base)+0x168) /* gen8+ */
-#define RING_BB_PER_CTX_PTR(base) _MMIO((base)+0x1c0) /* gen8+ */
-#define RING_INDIRECT_CTX(base) _MMIO((base)+0x1c4) /* gen8+ */
-#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base)+0x1c8) /* gen8+ */
-#define RING_CTX_TIMESTAMP(base) _MMIO((base)+0x3a8) /* gen8+ */
+#define RING_SBBADDR(base) _MMIO((base) + 0x114) /* hsw+ */
+#define RING_SBBSTATE(base) _MMIO((base) + 0x118) /* hsw+ */
+#define RING_SBBADDR_UDW(base) _MMIO((base) + 0x11c) /* gen8+ */
+#define RING_BBADDR(base) _MMIO((base) + 0x140)
+#define RING_BBADDR_UDW(base) _MMIO((base) + 0x168) /* gen8+ */
+#define RING_BB_PER_CTX_PTR(base) _MMIO((base) + 0x1c0) /* gen8+ */
+#define RING_INDIRECT_CTX(base) _MMIO((base) + 0x1c4) /* gen8+ */
+#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base) + 0x1c8) /* gen8+ */
+#define RING_CTX_TIMESTAMP(base) _MMIO((base) + 0x3a8) /* gen8+ */
#define ERROR_GEN6 _MMIO(0x40a0)
#define GEN7_ERR_INT _MMIO(0x44040)
-#define ERR_INT_POISON (1<<31)
-#define ERR_INT_MMIO_UNCLAIMED (1<<13)
-#define ERR_INT_PIPE_CRC_DONE_C (1<<8)
-#define ERR_INT_FIFO_UNDERRUN_C (1<<6)
-#define ERR_INT_PIPE_CRC_DONE_B (1<<5)
-#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
-#define ERR_INT_PIPE_CRC_DONE_A (1<<2)
-#define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + (pipe)*3))
-#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
-#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
+#define ERR_INT_POISON (1 << 31)
+#define ERR_INT_MMIO_UNCLAIMED (1 << 13)
+#define ERR_INT_PIPE_CRC_DONE_C (1 << 8)
+#define ERR_INT_FIFO_UNDERRUN_C (1 << 6)
+#define ERR_INT_PIPE_CRC_DONE_B (1 << 5)
+#define ERR_INT_FIFO_UNDERRUN_B (1 << 3)
+#define ERR_INT_PIPE_CRC_DONE_A (1 << 2)
+#define ERR_INT_PIPE_CRC_DONE(pipe) (1 << (2 + (pipe) * 3))
+#define ERR_INT_FIFO_UNDERRUN_A (1 << 0)
+#define ERR_INT_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3))
#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10)
#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14)
@@ -2391,7 +2503,7 @@ enum i915_power_well_id {
#define FAULT_GTT_SEL (1 << 4)
#define FPGA_DBG _MMIO(0x42300)
-#define FPGA_DBG_RM_NOCLAIM (1<<31)
+#define FPGA_DBG_RM_NOCLAIM (1 << 31)
#define CLAIM_ER _MMIO(VLV_DISPLAY_BASE + 0x2028)
#define CLAIM_ER_CLR (1 << 31)
@@ -2400,22 +2512,22 @@ enum i915_power_well_id {
#define DERRMR _MMIO(0x44050)
/* Note that HBLANK events are reserved on bdw+ */
-#define DERRMR_PIPEA_SCANLINE (1<<0)
-#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1)
-#define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2)
-#define DERRMR_PIPEA_VBLANK (1<<3)
-#define DERRMR_PIPEA_HBLANK (1<<5)
-#define DERRMR_PIPEB_SCANLINE (1<<8)
-#define DERRMR_PIPEB_PRI_FLIP_DONE (1<<9)
-#define DERRMR_PIPEB_SPR_FLIP_DONE (1<<10)
-#define DERRMR_PIPEB_VBLANK (1<<11)
-#define DERRMR_PIPEB_HBLANK (1<<13)
+#define DERRMR_PIPEA_SCANLINE (1 << 0)
+#define DERRMR_PIPEA_PRI_FLIP_DONE (1 << 1)
+#define DERRMR_PIPEA_SPR_FLIP_DONE (1 << 2)
+#define DERRMR_PIPEA_VBLANK (1 << 3)
+#define DERRMR_PIPEA_HBLANK (1 << 5)
+#define DERRMR_PIPEB_SCANLINE (1 << 8)
+#define DERRMR_PIPEB_PRI_FLIP_DONE (1 << 9)
+#define DERRMR_PIPEB_SPR_FLIP_DONE (1 << 10)
+#define DERRMR_PIPEB_VBLANK (1 << 11)
+#define DERRMR_PIPEB_HBLANK (1 << 13)
/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
-#define DERRMR_PIPEC_SCANLINE (1<<14)
-#define DERRMR_PIPEC_PRI_FLIP_DONE (1<<15)
-#define DERRMR_PIPEC_SPR_FLIP_DONE (1<<20)
-#define DERRMR_PIPEC_VBLANK (1<<21)
-#define DERRMR_PIPEC_HBLANK (1<<22)
+#define DERRMR_PIPEC_SCANLINE (1 << 14)
+#define DERRMR_PIPEC_PRI_FLIP_DONE (1 << 15)
+#define DERRMR_PIPEC_SPR_FLIP_DONE (1 << 20)
+#define DERRMR_PIPEC_VBLANK (1 << 21)
+#define DERRMR_PIPEC_HBLANK (1 << 22)
/* GM45+ chicken bits -- debug workaround bits that may be required
@@ -2439,7 +2551,7 @@ enum i915_power_well_id {
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
-#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */
+#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x) << 1) /* gen8+ */
#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */
#define MI_MODE _MMIO(0x209c)
@@ -2478,22 +2590,22 @@ enum i915_power_well_id {
#define GFX_MODE _MMIO(0x2520)
#define GFX_MODE_GEN7 _MMIO(0x229c)
-#define RING_MODE_GEN7(engine) _MMIO((engine)->mmio_base+0x29c)
-#define GFX_RUN_LIST_ENABLE (1<<15)
-#define GFX_INTERRUPT_STEERING (1<<14)
-#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13)
-#define GFX_SURFACE_FAULT_ENABLE (1<<12)
-#define GFX_REPLAY_MODE (1<<11)
-#define GFX_PSMI_GRANULARITY (1<<10)
-#define GFX_PPGTT_ENABLE (1<<9)
-#define GEN8_GFX_PPGTT_48B (1<<7)
-
-#define GFX_FORWARD_VBLANK_MASK (3<<5)
-#define GFX_FORWARD_VBLANK_NEVER (0<<5)
-#define GFX_FORWARD_VBLANK_ALWAYS (1<<5)
-#define GFX_FORWARD_VBLANK_COND (2<<5)
-
-#define GEN11_GFX_DISABLE_LEGACY_MODE (1<<3)
+#define RING_MODE_GEN7(engine) _MMIO((engine)->mmio_base + 0x29c)
+#define GFX_RUN_LIST_ENABLE (1 << 15)
+#define GFX_INTERRUPT_STEERING (1 << 14)
+#define GFX_TLB_INVALIDATE_EXPLICIT (1 << 13)
+#define GFX_SURFACE_FAULT_ENABLE (1 << 12)
+#define GFX_REPLAY_MODE (1 << 11)
+#define GFX_PSMI_GRANULARITY (1 << 10)
+#define GFX_PPGTT_ENABLE (1 << 9)
+#define GEN8_GFX_PPGTT_48B (1 << 7)
+
+#define GFX_FORWARD_VBLANK_MASK (3 << 5)
+#define GFX_FORWARD_VBLANK_NEVER (0 << 5)
+#define GFX_FORWARD_VBLANK_ALWAYS (1 << 5)
+#define GFX_FORWARD_VBLANK_COND (2 << 5)
+
+#define GEN11_GFX_DISABLE_LEGACY_MODE (1 << 3)
#define VLV_DISPLAY_BASE 0x180000
#define VLV_MIPI_BASE VLV_DISPLAY_BASE
@@ -2507,8 +2619,8 @@ enum i915_power_well_id {
#define IMR _MMIO(0x20a8)
#define ISR _MMIO(0x20ac)
#define VLV_GUNIT_CLOCK_GATE _MMIO(VLV_DISPLAY_BASE + 0x2060)
-#define GINT_DIS (1<<22)
-#define GCFG_DIS (1<<8)
+#define GINT_DIS (1 << 22)
+#define GCFG_DIS (1 << 8)
#define VLV_GUNIT_CLOCK_GATE2 _MMIO(VLV_DISPLAY_BASE + 0x2064)
#define VLV_IIR_RW _MMIO(VLV_DISPLAY_BASE + 0x2084)
#define VLV_IER _MMIO(VLV_DISPLAY_BASE + 0x20a0)
@@ -2518,35 +2630,35 @@ enum i915_power_well_id {
#define VLV_PCBR _MMIO(VLV_DISPLAY_BASE + 0x2120)
#define VLV_PCBR_ADDR_SHIFT 12
-#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
+#define DISPLAY_PLANE_FLIP_PENDING(plane) (1 << (11 - (plane))) /* A and B only */
#define EIR _MMIO(0x20b0)
#define EMR _MMIO(0x20b4)
#define ESR _MMIO(0x20b8)
-#define GM45_ERROR_PAGE_TABLE (1<<5)
-#define GM45_ERROR_MEM_PRIV (1<<4)
-#define I915_ERROR_PAGE_TABLE (1<<4)
-#define GM45_ERROR_CP_PRIV (1<<3)
-#define I915_ERROR_MEMORY_REFRESH (1<<1)
-#define I915_ERROR_INSTRUCTION (1<<0)
+#define GM45_ERROR_PAGE_TABLE (1 << 5)
+#define GM45_ERROR_MEM_PRIV (1 << 4)
+#define I915_ERROR_PAGE_TABLE (1 << 4)
+#define GM45_ERROR_CP_PRIV (1 << 3)
+#define I915_ERROR_MEMORY_REFRESH (1 << 1)
+#define I915_ERROR_INSTRUCTION (1 << 0)
#define INSTPM _MMIO(0x20c0)
-#define INSTPM_SELF_EN (1<<12) /* 915GM only */
-#define INSTPM_AGPBUSY_INT_EN (1<<11) /* gen3: when disabled, pending interrupts
+#define INSTPM_SELF_EN (1 << 12) /* 915GM only */
+#define INSTPM_AGPBUSY_INT_EN (1 << 11) /* gen3: when disabled, pending interrupts
will not assert AGPBUSY# and will only
be delivered when out of C3. */
-#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
-#define INSTPM_TLB_INVALIDATE (1<<9)
-#define INSTPM_SYNC_FLUSH (1<<5)
+#define INSTPM_FORCE_ORDERING (1 << 7) /* GEN6+ */
+#define INSTPM_TLB_INVALIDATE (1 << 9)
+#define INSTPM_SYNC_FLUSH (1 << 5)
#define ACTHD _MMIO(0x20c8)
#define MEM_MODE _MMIO(0x20cc)
-#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1<<3) /* 830 only */
-#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1<<2) /* 830/845 only */
-#define MEM_DISPLAY_TRICKLE_FEED_DISABLE (1<<2) /* 85x only */
+#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1 << 3) /* 830 only */
+#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1 << 2) /* 830/845 only */
+#define MEM_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2) /* 85x only */
#define FW_BLC _MMIO(0x20d8)
#define FW_BLC2 _MMIO(0x20dc)
#define FW_BLC_SELF _MMIO(0x20e0) /* 915+ only */
-#define FW_BLC_SELF_EN_MASK (1<<31)
-#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
-#define FW_BLC_SELF_EN (1<<15) /* 945 only */
+#define FW_BLC_SELF_EN_MASK (1 << 31)
+#define FW_BLC_SELF_FIFO_MASK (1 << 16) /* 945 only */
+#define FW_BLC_SELF_EN (1 << 15) /* 945 only */
#define MM_BURST_LENGTH 0x00700000
#define MM_FIFO_WATERMARK 0x0001F000
#define LM_BURST_LENGTH 0x00000700
@@ -2645,37 +2757,37 @@ enum i915_power_well_id {
#define MI_AGPBUSY_830_MODE (1 << 0) /* 85x only */
#define CACHE_MODE_0 _MMIO(0x2120) /* 915+ only */
-#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
-#define CM0_IZ_OPT_DISABLE (1<<6)
-#define CM0_ZR_OPT_DISABLE (1<<5)
-#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
-#define CM0_DEPTH_EVICT_DISABLE (1<<4)
-#define CM0_COLOR_EVICT_DISABLE (1<<3)
-#define CM0_DEPTH_WRITE_DISABLE (1<<1)
-#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
+#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1 << 8)
+#define CM0_IZ_OPT_DISABLE (1 << 6)
+#define CM0_ZR_OPT_DISABLE (1 << 5)
+#define CM0_STC_EVICT_DISABLE_LRA_SNB (1 << 5)
+#define CM0_DEPTH_EVICT_DISABLE (1 << 4)
+#define CM0_COLOR_EVICT_DISABLE (1 << 3)
+#define CM0_DEPTH_WRITE_DISABLE (1 << 1)
+#define CM0_RC_OP_FLUSH_DISABLE (1 << 0)
#define GFX_FLSH_CNTL _MMIO(0x2170) /* 915+ only */
#define GFX_FLSH_CNTL_GEN6 _MMIO(0x101008)
-#define GFX_FLSH_CNTL_EN (1<<0)
+#define GFX_FLSH_CNTL_EN (1 << 0)
#define ECOSKPD _MMIO(0x21d0)
-#define ECO_GATING_CX_ONLY (1<<3)
-#define ECO_FLIP_DONE (1<<0)
+#define ECO_GATING_CX_ONLY (1 << 3)
+#define ECO_FLIP_DONE (1 << 0)
#define CACHE_MODE_0_GEN7 _MMIO(0x7000) /* IVB+ */
-#define RC_OP_FLUSH_ENABLE (1<<0)
-#define HIZ_RAW_STALL_OPT_DISABLE (1<<2)
+#define RC_OP_FLUSH_ENABLE (1 << 0)
+#define HIZ_RAW_STALL_OPT_DISABLE (1 << 2)
#define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */
-#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
-#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6)
-#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1<<1)
+#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1 << 6)
+#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1 << 6)
+#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1 << 1)
#define GEN6_BLITTER_ECOSKPD _MMIO(0x221d0)
#define GEN6_BLITTER_LOCK_SHIFT 16
-#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
+#define GEN6_BLITTER_FBC_NOTIFY (1 << 3)
#define GEN6_RC_SLEEP_PSMI_CONTROL _MMIO(0x2050)
#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
-#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
+#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1 << 10)
#define GEN6_RCS_PWR_FSM _MMIO(0x22ac)
#define GEN9_RCS_FE_FSM2 _MMIO(0x22a4)
@@ -2714,6 +2826,10 @@ enum i915_power_well_id {
#define GEN10_F2_SS_DIS_SHIFT 18
#define GEN10_F2_SS_DIS_MASK (0xf << GEN10_F2_SS_DIS_SHIFT)
+#define GEN10_MIRROR_FUSE3 _MMIO(0x9118)
+#define GEN10_L3BANK_PAIR_COUNT 4
+#define GEN10_L3BANK_MASK 0x0F
+
#define GEN8_EU_DISABLE0 _MMIO(0x9134)
#define GEN8_EU_DIS0_S0_MASK 0xffffff
#define GEN8_EU_DIS0_S1_SHIFT 24
@@ -2727,7 +2843,7 @@ enum i915_power_well_id {
#define GEN8_EU_DISABLE2 _MMIO(0x913c)
#define GEN8_EU_DIS2_S2_MASK 0xff
-#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice)*0x4)
+#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice) * 0x4)
#define GEN10_EU_DISABLE3 _MMIO(0x9140)
#define GEN10_EU_DIS_SS_MASK 0xff
@@ -2784,44 +2900,43 @@ enum i915_power_well_id {
(IS_HASWELL(dev_priv) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
/* These are all the "old" interrupts */
-#define ILK_BSD_USER_INTERRUPT (1<<5)
-
-#define I915_PM_INTERRUPT (1<<31)
-#define I915_ISP_INTERRUPT (1<<22)
-#define I915_LPE_PIPE_B_INTERRUPT (1<<21)
-#define I915_LPE_PIPE_A_INTERRUPT (1<<20)
-#define I915_MIPIC_INTERRUPT (1<<19)
-#define I915_MIPIA_INTERRUPT (1<<18)
-#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
-#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
-#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1<<16)
-#define I915_MASTER_ERROR_INTERRUPT (1<<15)
-#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
-#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1<<14)
-#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
-#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1<<13)
-#define I915_HWB_OOM_INTERRUPT (1<<13)
-#define I915_LPE_PIPE_C_INTERRUPT (1<<12)
-#define I915_SYNC_STATUS_INTERRUPT (1<<12)
-#define I915_MISC_INTERRUPT (1<<11)
-#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
-#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT (1<<10)
-#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
-#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT (1<<9)
-#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
-#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT (1<<8)
-#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
-#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
-#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
-#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
-#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
-#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT (1<<3)
-#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT (1<<2)
-#define I915_DEBUG_INTERRUPT (1<<2)
-#define I915_WINVALID_INTERRUPT (1<<1)
-#define I915_USER_INTERRUPT (1<<1)
-#define I915_ASLE_INTERRUPT (1<<0)
-#define I915_BSD_USER_INTERRUPT (1<<25)
+#define ILK_BSD_USER_INTERRUPT (1 << 5)
+
+#define I915_PM_INTERRUPT (1 << 31)
+#define I915_ISP_INTERRUPT (1 << 22)
+#define I915_LPE_PIPE_B_INTERRUPT (1 << 21)
+#define I915_LPE_PIPE_A_INTERRUPT (1 << 20)
+#define I915_MIPIC_INTERRUPT (1 << 19)
+#define I915_MIPIA_INTERRUPT (1 << 18)
+#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 18)
+#define I915_DISPLAY_PORT_INTERRUPT (1 << 17)
+#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1 << 16)
+#define I915_MASTER_ERROR_INTERRUPT (1 << 15)
+#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1 << 14)
+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1 << 14) /* p-state */
+#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1 << 13)
+#define I915_HWB_OOM_INTERRUPT (1 << 13)
+#define I915_LPE_PIPE_C_INTERRUPT (1 << 12)
+#define I915_SYNC_STATUS_INTERRUPT (1 << 12)
+#define I915_MISC_INTERRUPT (1 << 11)
+#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1 << 11)
+#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT (1 << 10)
+#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1 << 10)
+#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT (1 << 9)
+#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1 << 9)
+#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT (1 << 8)
+#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1 << 8)
+#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1 << 7)
+#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1 << 6)
+#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1 << 5)
+#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1 << 4)
+#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT (1 << 3)
+#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT (1 << 2)
+#define I915_DEBUG_INTERRUPT (1 << 2)
+#define I915_WINVALID_INTERRUPT (1 << 1)
+#define I915_USER_INTERRUPT (1 << 1)
+#define I915_ASLE_INTERRUPT (1 << 0)
+#define I915_BSD_USER_INTERRUPT (1 << 25)
#define I915_HDMI_LPE_AUDIO_BASE (VLV_DISPLAY_BASE + 0x65000)
#define I915_HDMI_LPE_AUDIO_SIZE 0x1000
@@ -2844,19 +2959,19 @@ enum i915_power_well_id {
#define GEN7_FF_THREAD_MODE _MMIO(0x20a0)
#define GEN7_FF_SCHED_MASK 0x0077070
#define GEN8_FF_DS_REF_CNT_FFME (1 << 19)
-#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
-#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
-#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
-#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
+#define GEN7_FF_TS_SCHED_HS1 (0x5 << 16)
+#define GEN7_FF_TS_SCHED_HS0 (0x3 << 16)
+#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1 << 16)
+#define GEN7_FF_TS_SCHED_HW (0x0 << 16) /* Default */
#define GEN7_FF_VS_REF_CNT_FFME (1 << 15)
-#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
-#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
-#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
-#define GEN7_FF_VS_SCHED_HW (0x0<<12)
-#define GEN7_FF_DS_SCHED_HS1 (0x5<<4)
-#define GEN7_FF_DS_SCHED_HS0 (0x3<<4)
-#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */
-#define GEN7_FF_DS_SCHED_HW (0x0<<4)
+#define GEN7_FF_VS_SCHED_HS1 (0x5 << 12)
+#define GEN7_FF_VS_SCHED_HS0 (0x3 << 12)
+#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1 << 12) /* Default */
+#define GEN7_FF_VS_SCHED_HW (0x0 << 12)
+#define GEN7_FF_DS_SCHED_HS1 (0x5 << 4)
+#define GEN7_FF_DS_SCHED_HS0 (0x3 << 4)
+#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1 << 4) /* Default */
+#define GEN7_FF_DS_SCHED_HW (0x0 << 4)
/*
* Framebuffer compression (915+ only)
@@ -2865,51 +2980,51 @@ enum i915_power_well_id {
#define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */
#define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */
#define FBC_CONTROL _MMIO(0x3208)
-#define FBC_CTL_EN (1<<31)
-#define FBC_CTL_PERIODIC (1<<30)
+#define FBC_CTL_EN (1 << 31)
+#define FBC_CTL_PERIODIC (1 << 30)
#define FBC_CTL_INTERVAL_SHIFT (16)
-#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
-#define FBC_CTL_C3_IDLE (1<<13)
+#define FBC_CTL_UNCOMPRESSIBLE (1 << 14)
+#define FBC_CTL_C3_IDLE (1 << 13)
#define FBC_CTL_STRIDE_SHIFT (5)
#define FBC_CTL_FENCENO_SHIFT (0)
#define FBC_COMMAND _MMIO(0x320c)
-#define FBC_CMD_COMPRESS (1<<0)
+#define FBC_CMD_COMPRESS (1 << 0)
#define FBC_STATUS _MMIO(0x3210)
-#define FBC_STAT_COMPRESSING (1<<31)
-#define FBC_STAT_COMPRESSED (1<<30)
-#define FBC_STAT_MODIFIED (1<<29)
+#define FBC_STAT_COMPRESSING (1 << 31)
+#define FBC_STAT_COMPRESSED (1 << 30)
+#define FBC_STAT_MODIFIED (1 << 29)
#define FBC_STAT_CURRENT_LINE_SHIFT (0)
#define FBC_CONTROL2 _MMIO(0x3214)
-#define FBC_CTL_FENCE_DBL (0<<4)
-#define FBC_CTL_IDLE_IMM (0<<2)
-#define FBC_CTL_IDLE_FULL (1<<2)
-#define FBC_CTL_IDLE_LINE (2<<2)
-#define FBC_CTL_IDLE_DEBUG (3<<2)
-#define FBC_CTL_CPU_FENCE (1<<1)
-#define FBC_CTL_PLANE(plane) ((plane)<<0)
+#define FBC_CTL_FENCE_DBL (0 << 4)
+#define FBC_CTL_IDLE_IMM (0 << 2)
+#define FBC_CTL_IDLE_FULL (1 << 2)
+#define FBC_CTL_IDLE_LINE (2 << 2)
+#define FBC_CTL_IDLE_DEBUG (3 << 2)
+#define FBC_CTL_CPU_FENCE (1 << 1)
+#define FBC_CTL_PLANE(plane) ((plane) << 0)
#define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */
#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4)
#define FBC_LL_SIZE (1536)
#define FBC_LLC_READ_CTRL _MMIO(0x9044)
-#define FBC_LLC_FULLY_OPEN (1<<30)
+#define FBC_LLC_FULLY_OPEN (1 << 30)
/* Framebuffer compression for GM45+ */
#define DPFC_CB_BASE _MMIO(0x3200)
#define DPFC_CONTROL _MMIO(0x3208)
-#define DPFC_CTL_EN (1<<31)
-#define DPFC_CTL_PLANE(plane) ((plane)<<30)
-#define IVB_DPFC_CTL_PLANE(plane) ((plane)<<29)
-#define DPFC_CTL_FENCE_EN (1<<29)
-#define IVB_DPFC_CTL_FENCE_EN (1<<28)
-#define DPFC_CTL_PERSISTENT_MODE (1<<25)
-#define DPFC_SR_EN (1<<10)
-#define DPFC_CTL_LIMIT_1X (0<<6)
-#define DPFC_CTL_LIMIT_2X (1<<6)
-#define DPFC_CTL_LIMIT_4X (2<<6)
+#define DPFC_CTL_EN (1 << 31)
+#define DPFC_CTL_PLANE(plane) ((plane) << 30)
+#define IVB_DPFC_CTL_PLANE(plane) ((plane) << 29)
+#define DPFC_CTL_FENCE_EN (1 << 29)
+#define IVB_DPFC_CTL_FENCE_EN (1 << 28)
+#define DPFC_CTL_PERSISTENT_MODE (1 << 25)
+#define DPFC_SR_EN (1 << 10)
+#define DPFC_CTL_LIMIT_1X (0 << 6)
+#define DPFC_CTL_LIMIT_2X (1 << 6)
+#define DPFC_CTL_LIMIT_4X (2 << 6)
#define DPFC_RECOMP_CTL _MMIO(0x320c)
-#define DPFC_RECOMP_STALL_EN (1<<27)
+#define DPFC_RECOMP_STALL_EN (1 << 27)
#define DPFC_RECOMP_STALL_WM_SHIFT (16)
#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
@@ -2922,12 +3037,12 @@ enum i915_power_well_id {
#define DPFC_STATUS2 _MMIO(0x3214)
#define DPFC_FENCE_YOFF _MMIO(0x3218)
#define DPFC_CHICKEN _MMIO(0x3224)
-#define DPFC_HT_MODIFY (1<<31)
+#define DPFC_HT_MODIFY (1 << 31)
/* Framebuffer compression for Ironlake */
#define ILK_DPFC_CB_BASE _MMIO(0x43200)
#define ILK_DPFC_CONTROL _MMIO(0x43208)
-#define FBC_CTL_FALSE_COLOR (1<<10)
+#define FBC_CTL_FALSE_COLOR (1 << 10)
/* The bit 28-8 is reserved */
#define DPFC_RESERVED (0x1FFFFF00)
#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c)
@@ -2938,15 +3053,15 @@ enum i915_power_well_id {
#define BDW_FBC_COMP_SEG_MASK 0xfff
#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218)
#define ILK_DPFC_CHICKEN _MMIO(0x43224)
-#define ILK_DPFC_DISABLE_DUMMY0 (1<<8)
-#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23)
+#define ILK_DPFC_DISABLE_DUMMY0 (1 << 8)
+#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1 << 23)
#define ILK_FBC_RT_BASE _MMIO(0x2128)
-#define ILK_FBC_RT_VALID (1<<0)
-#define SNB_FBC_FRONT_BUFFER (1<<1)
+#define ILK_FBC_RT_VALID (1 << 0)
+#define SNB_FBC_FRONT_BUFFER (1 << 1)
#define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000)
-#define ILK_FBCQ_DIS (1<<22)
-#define ILK_PABSTRETCH_DIS (1<<21)
+#define ILK_FBCQ_DIS (1 << 22)
+#define ILK_PABSTRETCH_DIS (1 << 21)
/*
@@ -2955,7 +3070,7 @@ enum i915_power_well_id {
* The following two registers are of type GTTMMADR
*/
#define SNB_DPFC_CTL_SA _MMIO(0x100100)
-#define SNB_CPU_FENCE_ENABLE (1<<29)
+#define SNB_CPU_FENCE_ENABLE (1 << 29)
#define DPFC_CPU_FENCE_OFFSET _MMIO(0x100104)
/* Framebuffer compression for Ivybridge */
@@ -2965,8 +3080,8 @@ enum i915_power_well_id {
#define IPS_ENABLE (1 << 31)
#define MSG_FBC_REND_STATE _MMIO(0x50380)
-#define FBC_REND_NUKE (1<<2)
-#define FBC_REND_CACHE_CLEAN (1<<1)
+#define FBC_REND_NUKE (1 << 2)
+#define FBC_REND_CACHE_CLEAN (1 << 1)
/*
* GPIO regs
@@ -2979,6 +3094,10 @@ enum i915_power_well_id {
#define GPIOF _MMIO(0x5024)
#define GPIOG _MMIO(0x5028)
#define GPIOH _MMIO(0x502c)
+#define GPIOJ _MMIO(0x5034)
+#define GPIOK _MMIO(0x5038)
+#define GPIOL _MMIO(0x503C)
+#define GPIOM _MMIO(0x5040)
# define GPIO_CLOCK_DIR_MASK (1 << 0)
# define GPIO_CLOCK_DIR_IN (0 << 1)
# define GPIO_CLOCK_DIR_OUT (1 << 1)
@@ -2995,12 +3114,13 @@ enum i915_power_well_id {
# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
#define GMBUS0 _MMIO(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */
-#define GMBUS_AKSV_SELECT (1<<11)
-#define GMBUS_RATE_100KHZ (0<<8)
-#define GMBUS_RATE_50KHZ (1<<8)
-#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
-#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */
-#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */
+#define GMBUS_AKSV_SELECT (1 << 11)
+#define GMBUS_RATE_100KHZ (0 << 8)
+#define GMBUS_RATE_50KHZ (1 << 8)
+#define GMBUS_RATE_400KHZ (2 << 8) /* reserved on Pineview */
+#define GMBUS_RATE_1MHZ (3 << 8) /* reserved on Pineview */
+#define GMBUS_HOLD_EXT (1 << 7) /* 300ns hold time, rsvd on Pineview */
+#define GMBUS_BYTE_CNT_OVERRIDE (1 << 6)
#define GMBUS_PIN_DISABLED 0
#define GMBUS_PIN_SSC 1
#define GMBUS_PIN_VGADDC 2
@@ -3021,36 +3141,37 @@ enum i915_power_well_id {
#define GMBUS_NUM_PINS 13 /* including 0 */
#define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
-#define GMBUS_SW_CLR_INT (1<<31)
-#define GMBUS_SW_RDY (1<<30)
-#define GMBUS_ENT (1<<29) /* enable timeout */
-#define GMBUS_CYCLE_NONE (0<<25)
-#define GMBUS_CYCLE_WAIT (1<<25)
-#define GMBUS_CYCLE_INDEX (2<<25)
-#define GMBUS_CYCLE_STOP (4<<25)
+#define GMBUS_SW_CLR_INT (1 << 31)
+#define GMBUS_SW_RDY (1 << 30)
+#define GMBUS_ENT (1 << 29) /* enable timeout */
+#define GMBUS_CYCLE_NONE (0 << 25)
+#define GMBUS_CYCLE_WAIT (1 << 25)
+#define GMBUS_CYCLE_INDEX (2 << 25)
+#define GMBUS_CYCLE_STOP (4 << 25)
#define GMBUS_BYTE_COUNT_SHIFT 16
#define GMBUS_BYTE_COUNT_MAX 256U
+#define GEN9_GMBUS_BYTE_COUNT_MAX 511U
#define GMBUS_SLAVE_INDEX_SHIFT 8
#define GMBUS_SLAVE_ADDR_SHIFT 1
-#define GMBUS_SLAVE_READ (1<<0)
-#define GMBUS_SLAVE_WRITE (0<<0)
+#define GMBUS_SLAVE_READ (1 << 0)
+#define GMBUS_SLAVE_WRITE (0 << 0)
#define GMBUS2 _MMIO(dev_priv->gpio_mmio_base + 0x5108) /* status */
-#define GMBUS_INUSE (1<<15)
-#define GMBUS_HW_WAIT_PHASE (1<<14)
-#define GMBUS_STALL_TIMEOUT (1<<13)
-#define GMBUS_INT (1<<12)
-#define GMBUS_HW_RDY (1<<11)
-#define GMBUS_SATOER (1<<10)
-#define GMBUS_ACTIVE (1<<9)
+#define GMBUS_INUSE (1 << 15)
+#define GMBUS_HW_WAIT_PHASE (1 << 14)
+#define GMBUS_STALL_TIMEOUT (1 << 13)
+#define GMBUS_INT (1 << 12)
+#define GMBUS_HW_RDY (1 << 11)
+#define GMBUS_SATOER (1 << 10)
+#define GMBUS_ACTIVE (1 << 9)
#define GMBUS3 _MMIO(dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */
#define GMBUS4 _MMIO(dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */
-#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
-#define GMBUS_NAK_EN (1<<3)
-#define GMBUS_IDLE_EN (1<<2)
-#define GMBUS_HW_WAIT_EN (1<<1)
-#define GMBUS_HW_RDY_EN (1<<0)
+#define GMBUS_SLAVE_TIMEOUT_EN (1 << 4)
+#define GMBUS_NAK_EN (1 << 3)
+#define GMBUS_IDLE_EN (1 << 2)
+#define GMBUS_HW_WAIT_EN (1 << 1)
+#define GMBUS_HW_RDY_EN (1 << 0)
#define GMBUS5 _MMIO(dev_priv->gpio_mmio_base + 0x5120) /* byte index */
-#define GMBUS_2BYTE_INDEX_EN (1<<31)
+#define GMBUS_2BYTE_INDEX_EN (1 << 31)
/*
* Clock control & power management
@@ -3088,10 +3209,10 @@ enum i915_power_well_id {
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
-#define DPLL_LOCK_VLV (1<<15)
-#define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14)
-#define DPLL_INTEGRATED_REF_CLK_VLV (1<<13)
-#define DPLL_SSC_REF_CLK_CHV (1<<13)
+#define DPLL_LOCK_VLV (1 << 15)
+#define DPLL_INTEGRATED_CRI_CLK_VLV (1 << 14)
+#define DPLL_INTEGRATED_REF_CLK_VLV (1 << 13)
+#define DPLL_SSC_REF_CLK_CHV (1 << 13)
#define DPLL_PORTC_READY_MASK (0xf << 4)
#define DPLL_PORTB_READY_MASK (0xf)
@@ -3101,20 +3222,20 @@ enum i915_power_well_id {
#define DPIO_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x6240)
#define DPLL_PORTD_READY_MASK (0xf)
#define DISPLAY_PHY_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x60100)
-#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2*(phy)+(ch)+27))
+#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2 * (phy) + (ch) + 27))
#define PHY_LDO_DELAY_0NS 0x0
#define PHY_LDO_DELAY_200NS 0x1
#define PHY_LDO_DELAY_600NS 0x2
-#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2*(phy)+23))
-#define PHY_CH_POWER_DOWN_OVRD(mask, phy, ch) ((mask) << (8*(phy)+4*(ch)+11))
+#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2 * (phy) + 23))
+#define PHY_CH_POWER_DOWN_OVRD(mask, phy, ch) ((mask) << (8 * (phy) + 4 * (ch) + 11))
#define PHY_CH_SU_PSR 0x1
#define PHY_CH_DEEP_PSR 0x7
-#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6*(phy)+3*(ch)+2))
+#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6 * (phy) + 3 * (ch) + 2))
#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
#define DISPLAY_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x60104)
-#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
-#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6-(6*(phy)+3*(ch))))
-#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8-(6*(phy)+3*(ch)+(spline))))
+#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1 << 31) : (1 << 30))
+#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6 - (6 * (phy) + 3 * (ch))))
+#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8 - (6 * (phy) + 3 * (ch) + (spline))))
/*
* The i830 generation, in LVDS mode, defines P1 as the bit number set within
@@ -3135,7 +3256,7 @@ enum i915_power_well_id {
/* Ironlake */
# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
-# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9)
+# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x) - 1) << 9)
# define DPLL_FPA1_P1_POST_DIV_SHIFT 0
# define DPLL_FPA1_P1_POST_DIV_MASK 0xff
@@ -3224,10 +3345,10 @@ enum i915_power_well_id {
#define DPLLA_TEST_M_BYPASS (1 << 2)
#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
#define D_STATE _MMIO(0x6104)
-#define DSTATE_GFX_RESET_I830 (1<<6)
-#define DSTATE_PLL_D3_OFF (1<<3)
-#define DSTATE_GFX_CLOCK_GATING (1<<1)
-#define DSTATE_DOT_CLOCK_GATING (1<<0)
+#define DSTATE_GFX_RESET_I830 (1 << 6)
+#define DSTATE_PLL_D3_OFF (1 << 3)
+#define DSTATE_GFX_CLOCK_GATING (1 << 1)
+#define DSTATE_DOT_CLOCK_GATING (1 << 0)
#define DSPCLK_GATE_D _MMIO(dev_priv->info.display_mmio_offset + 0x6200)
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
@@ -3344,7 +3465,7 @@ enum i915_power_well_id {
#define DEUC _MMIO(0x6214) /* CRL only */
#define FW_BLC_SELF_VLV _MMIO(VLV_DISPLAY_BASE + 0x6500)
-#define FW_CSPWRDWNEN (1<<15)
+#define FW_CSPWRDWNEN (1 << 15)
#define MI_ARB_VLV _MMIO(VLV_DISPLAY_BASE + 0x6504)
@@ -3469,7 +3590,7 @@ enum i915_power_well_id {
#define HPLLVCO_MOBILE _MMIO(MCHBAR_MIRROR_BASE + 0xc0f)
#define TSC1 _MMIO(0x11001)
-#define TSE (1<<0)
+#define TSE (1 << 0)
#define TR1 _MMIO(0x11006)
#define TSFS _MMIO(0x11020)
#define TSFS_SLOPE_MASK 0x0000ff00
@@ -3515,23 +3636,23 @@ enum i915_power_well_id {
#define MEMCTL_CMD_CHVID 3
#define MEMCTL_CMD_VMMOFF 4
#define MEMCTL_CMD_VMMON 5
-#define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears
+#define MEMCTL_CMD_STS (1 << 12) /* write 1 triggers command, clears
when command complete */
#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */
#define MEMCTL_FREQ_SHIFT 8
-#define MEMCTL_SFCAVM (1<<7)
+#define MEMCTL_SFCAVM (1 << 7)
#define MEMCTL_TGT_VID_MASK 0x007f
#define MEMIHYST _MMIO(0x1117c)
#define MEMINTREN _MMIO(0x11180) /* 16 bits */
-#define MEMINT_RSEXIT_EN (1<<8)
-#define MEMINT_CX_SUPR_EN (1<<7)
-#define MEMINT_CONT_BUSY_EN (1<<6)
-#define MEMINT_AVG_BUSY_EN (1<<5)
-#define MEMINT_EVAL_CHG_EN (1<<4)
-#define MEMINT_MON_IDLE_EN (1<<3)
-#define MEMINT_UP_EVAL_EN (1<<2)
-#define MEMINT_DOWN_EVAL_EN (1<<1)
-#define MEMINT_SW_CMD_EN (1<<0)
+#define MEMINT_RSEXIT_EN (1 << 8)
+#define MEMINT_CX_SUPR_EN (1 << 7)
+#define MEMINT_CONT_BUSY_EN (1 << 6)
+#define MEMINT_AVG_BUSY_EN (1 << 5)
+#define MEMINT_EVAL_CHG_EN (1 << 4)
+#define MEMINT_MON_IDLE_EN (1 << 3)
+#define MEMINT_UP_EVAL_EN (1 << 2)
+#define MEMINT_DOWN_EVAL_EN (1 << 1)
+#define MEMINT_SW_CMD_EN (1 << 0)
#define MEMINTRSTR _MMIO(0x11182) /* 16 bits */
#define MEM_RSEXIT_MASK 0xc000
#define MEM_RSEXIT_SHIFT 14
@@ -3553,26 +3674,26 @@ enum i915_power_well_id {
#define MEM_INT_STEER_SMI 2
#define MEM_INT_STEER_SCI 3
#define MEMINTRSTS _MMIO(0x11184)
-#define MEMINT_RSEXIT (1<<7)
-#define MEMINT_CONT_BUSY (1<<6)
-#define MEMINT_AVG_BUSY (1<<5)
-#define MEMINT_EVAL_CHG (1<<4)
-#define MEMINT_MON_IDLE (1<<3)
-#define MEMINT_UP_EVAL (1<<2)
-#define MEMINT_DOWN_EVAL (1<<1)
-#define MEMINT_SW_CMD (1<<0)
+#define MEMINT_RSEXIT (1 << 7)
+#define MEMINT_CONT_BUSY (1 << 6)
+#define MEMINT_AVG_BUSY (1 << 5)
+#define MEMINT_EVAL_CHG (1 << 4)
+#define MEMINT_MON_IDLE (1 << 3)
+#define MEMINT_UP_EVAL (1 << 2)
+#define MEMINT_DOWN_EVAL (1 << 1)
+#define MEMINT_SW_CMD (1 << 0)
#define MEMMODECTL _MMIO(0x11190)
-#define MEMMODE_BOOST_EN (1<<31)
+#define MEMMODE_BOOST_EN (1 << 31)
#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
#define MEMMODE_BOOST_FREQ_SHIFT 24
#define MEMMODE_IDLE_MODE_MASK 0x00030000
#define MEMMODE_IDLE_MODE_SHIFT 16
#define MEMMODE_IDLE_MODE_EVAL 0
#define MEMMODE_IDLE_MODE_CONT 1
-#define MEMMODE_HWIDLE_EN (1<<15)
-#define MEMMODE_SWMODE_EN (1<<14)
-#define MEMMODE_RCLK_GATE (1<<13)
-#define MEMMODE_HW_UPDATE (1<<12)
+#define MEMMODE_HWIDLE_EN (1 << 15)
+#define MEMMODE_SWMODE_EN (1 << 14)
+#define MEMMODE_RCLK_GATE (1 << 13)
+#define MEMMODE_HW_UPDATE (1 << 12)
#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */
#define MEMMODE_FSTART_SHIFT 8
#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
@@ -3586,8 +3707,8 @@ enum i915_power_well_id {
#define SWMEMCMD_TARVID (3 << 13)
#define SWMEMCMD_VRM_OFF (4 << 13)
#define SWMEMCMD_VRM_ON (5 << 13)
-#define CMDSTS (1<<12)
-#define SFCAVM (1<<11)
+#define CMDSTS (1 << 12)
+#define SFCAVM (1 << 11)
#define SWFREQ_MASK 0x0380 /* P0-7 */
#define SWFREQ_SHIFT 7
#define TARVID_MASK 0x001f
@@ -3596,49 +3717,49 @@ enum i915_power_well_id {
#define RCUPEI _MMIO(0x111b0)
#define RCDNEI _MMIO(0x111b4)
#define RSTDBYCTL _MMIO(0x111b8)
-#define RS1EN (1<<31)
-#define RS2EN (1<<30)
-#define RS3EN (1<<29)
-#define D3RS3EN (1<<28) /* Display D3 imlies RS3 */
-#define SWPROMORSX (1<<27) /* RSx promotion timers ignored */
-#define RCWAKERW (1<<26) /* Resetwarn from PCH causes wakeup */
-#define DPRSLPVREN (1<<25) /* Fast voltage ramp enable */
-#define GFXTGHYST (1<<24) /* Hysteresis to allow trunk gating */
-#define RCX_SW_EXIT (1<<23) /* Leave RSx and prevent re-entry */
-#define RSX_STATUS_MASK (7<<20)
-#define RSX_STATUS_ON (0<<20)
-#define RSX_STATUS_RC1 (1<<20)
-#define RSX_STATUS_RC1E (2<<20)
-#define RSX_STATUS_RS1 (3<<20)
-#define RSX_STATUS_RS2 (4<<20) /* aka rc6 */
-#define RSX_STATUS_RSVD (5<<20) /* deep rc6 unsupported on ilk */
-#define RSX_STATUS_RS3 (6<<20) /* rs3 unsupported on ilk */
-#define RSX_STATUS_RSVD2 (7<<20)
-#define UWRCRSXE (1<<19) /* wake counter limit prevents rsx */
-#define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */
-#define JRSC (1<<17) /* rsx coupled to cpu c-state */
-#define RS2INC0 (1<<16) /* allow rs2 in cpu c0 */
-#define RS1CONTSAV_MASK (3<<14)
-#define RS1CONTSAV_NO_RS1 (0<<14) /* rs1 doesn't save/restore context */
-#define RS1CONTSAV_RSVD (1<<14)
-#define RS1CONTSAV_SAVE_RS1 (2<<14) /* rs1 saves context */
-#define RS1CONTSAV_FULL_RS1 (3<<14) /* rs1 saves and restores context */
-#define NORMSLEXLAT_MASK (3<<12)
-#define SLOW_RS123 (0<<12)
-#define SLOW_RS23 (1<<12)
-#define SLOW_RS3 (2<<12)
-#define NORMAL_RS123 (3<<12)
-#define RCMODE_TIMEOUT (1<<11) /* 0 is eval interval method */
-#define IMPROMOEN (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
-#define RCENTSYNC (1<<9) /* rs coupled to cpu c-state (3/6/7) */
-#define STATELOCK (1<<7) /* locked to rs_cstate if 0 */
-#define RS_CSTATE_MASK (3<<4)
-#define RS_CSTATE_C367_RS1 (0<<4)
-#define RS_CSTATE_C36_RS1_C7_RS2 (1<<4)
-#define RS_CSTATE_RSVD (2<<4)
-#define RS_CSTATE_C367_RS2 (3<<4)
-#define REDSAVES (1<<3) /* no context save if was idle during rs0 */
-#define REDRESTORES (1<<2) /* no restore if was idle during rs0 */
+#define RS1EN (1 << 31)
+#define RS2EN (1 << 30)
+#define RS3EN (1 << 29)
+#define D3RS3EN (1 << 28) /* Display D3 imlies RS3 */
+#define SWPROMORSX (1 << 27) /* RSx promotion timers ignored */
+#define RCWAKERW (1 << 26) /* Resetwarn from PCH causes wakeup */
+#define DPRSLPVREN (1 << 25) /* Fast voltage ramp enable */
+#define GFXTGHYST (1 << 24) /* Hysteresis to allow trunk gating */
+#define RCX_SW_EXIT (1 << 23) /* Leave RSx and prevent re-entry */
+#define RSX_STATUS_MASK (7 << 20)
+#define RSX_STATUS_ON (0 << 20)
+#define RSX_STATUS_RC1 (1 << 20)
+#define RSX_STATUS_RC1E (2 << 20)
+#define RSX_STATUS_RS1 (3 << 20)
+#define RSX_STATUS_RS2 (4 << 20) /* aka rc6 */
+#define RSX_STATUS_RSVD (5 << 20) /* deep rc6 unsupported on ilk */
+#define RSX_STATUS_RS3 (6 << 20) /* rs3 unsupported on ilk */
+#define RSX_STATUS_RSVD2 (7 << 20)
+#define UWRCRSXE (1 << 19) /* wake counter limit prevents rsx */
+#define RSCRP (1 << 18) /* rs requests control on rs1/2 reqs */
+#define JRSC (1 << 17) /* rsx coupled to cpu c-state */
+#define RS2INC0 (1 << 16) /* allow rs2 in cpu c0 */
+#define RS1CONTSAV_MASK (3 << 14)
+#define RS1CONTSAV_NO_RS1 (0 << 14) /* rs1 doesn't save/restore context */
+#define RS1CONTSAV_RSVD (1 << 14)
+#define RS1CONTSAV_SAVE_RS1 (2 << 14) /* rs1 saves context */
+#define RS1CONTSAV_FULL_RS1 (3 << 14) /* rs1 saves and restores context */
+#define NORMSLEXLAT_MASK (3 << 12)
+#define SLOW_RS123 (0 << 12)
+#define SLOW_RS23 (1 << 12)
+#define SLOW_RS3 (2 << 12)
+#define NORMAL_RS123 (3 << 12)
+#define RCMODE_TIMEOUT (1 << 11) /* 0 is eval interval method */
+#define IMPROMOEN (1 << 10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
+#define RCENTSYNC (1 << 9) /* rs coupled to cpu c-state (3/6/7) */
+#define STATELOCK (1 << 7) /* locked to rs_cstate if 0 */
+#define RS_CSTATE_MASK (3 << 4)
+#define RS_CSTATE_C367_RS1 (0 << 4)
+#define RS_CSTATE_C36_RS1_C7_RS2 (1 << 4)
+#define RS_CSTATE_RSVD (2 << 4)
+#define RS_CSTATE_C367_RS2 (3 << 4)
+#define REDSAVES (1 << 3) /* no context save if was idle during rs0 */
+#define REDRESTORES (1 << 2) /* no restore if was idle during rs0 */
#define VIDCTL _MMIO(0x111c0)
#define VIDSTS _MMIO(0x111c8)
#define VIDSTART _MMIO(0x111cc) /* 8 bits */
@@ -3647,7 +3768,7 @@ enum i915_power_well_id {
#define MEMSTAT_VID_SHIFT 8
#define MEMSTAT_PSTATE_MASK 0x00f8
#define MEMSTAT_PSTATE_SHIFT 3
-#define MEMSTAT_MON_ACTV (1<<2)
+#define MEMSTAT_MON_ACTV (1 << 2)
#define MEMSTAT_SRC_CTL_MASK 0x0003
#define MEMSTAT_SRC_CTL_CORE 0
#define MEMSTAT_SRC_CTL_TRB 1
@@ -3656,7 +3777,7 @@ enum i915_power_well_id {
#define RCPREVBSYTUPAVG _MMIO(0x113b8)
#define RCPREVBSYTDNAVG _MMIO(0x113bc)
#define PMMISC _MMIO(0x11214)
-#define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */
+#define MCPPCE_EN (1 << 0) /* enable PM_MSG from PCH->MPC */
#define SDEW _MMIO(0x1124c)
#define CSIEW0 _MMIO(0x11250)
#define CSIEW1 _MMIO(0x11254)
@@ -3673,8 +3794,8 @@ enum i915_power_well_id {
#define RPPREVBSYTUPAVG _MMIO(0x113b8)
#define RPPREVBSYTDNAVG _MMIO(0x113bc)
#define ECR _MMIO(0x11600)
-#define ECR_GPFE (1<<31)
-#define ECR_IMONE (1<<30)
+#define ECR_GPFE (1 << 31)
+#define ECR_IMONE (1 << 30)
#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */
#define OGW0 _MMIO(0x11608)
#define OGW1 _MMIO(0x1160c)
@@ -3781,11 +3902,11 @@ enum {
FAULT_AND_CONTINUE /* Unsupported */
};
-#define GEN8_CTX_VALID (1<<0)
-#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
-#define GEN8_CTX_FORCE_RESTORE (1<<2)
-#define GEN8_CTX_L3LLC_COHERENT (1<<5)
-#define GEN8_CTX_PRIVILEGE (1<<8)
+#define GEN8_CTX_VALID (1 << 0)
+#define GEN8_CTX_FORCE_PD_RESTORE (1 << 1)
+#define GEN8_CTX_FORCE_RESTORE (1 << 2)
+#define GEN8_CTX_L3LLC_COHERENT (1 << 5)
+#define GEN8_CTX_PRIVILEGE (1 << 8)
#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
#define GEN8_CTX_ID_SHIFT 32
@@ -3807,7 +3928,7 @@ enum {
#define OVADD _MMIO(0x30000)
#define DOVSTA _MMIO(0x30008)
-#define OC_BUF (0x3<<20)
+#define OC_BUF (0x3 << 20)
#define OGAMC5 _MMIO(0x30010)
#define OGAMC4 _MMIO(0x30014)
#define OGAMC3 _MMIO(0x30018)
@@ -3975,64 +4096,65 @@ enum {
/* VLV eDP PSR registers */
#define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090)
#define _PSRCTLB (VLV_DISPLAY_BASE + 0x61090)
-#define VLV_EDP_PSR_ENABLE (1<<0)
-#define VLV_EDP_PSR_RESET (1<<1)
-#define VLV_EDP_PSR_MODE_MASK (7<<2)
-#define VLV_EDP_PSR_MODE_HW_TIMER (1<<3)
-#define VLV_EDP_PSR_MODE_SW_TIMER (1<<2)
-#define VLV_EDP_PSR_SINGLE_FRAME_UPDATE (1<<7)
-#define VLV_EDP_PSR_ACTIVE_ENTRY (1<<8)
-#define VLV_EDP_PSR_SRC_TRANSMITTER_STATE (1<<9)
-#define VLV_EDP_PSR_DBL_FRAME (1<<10)
-#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff<<16)
+#define VLV_EDP_PSR_ENABLE (1 << 0)
+#define VLV_EDP_PSR_RESET (1 << 1)
+#define VLV_EDP_PSR_MODE_MASK (7 << 2)
+#define VLV_EDP_PSR_MODE_HW_TIMER (1 << 3)
+#define VLV_EDP_PSR_MODE_SW_TIMER (1 << 2)
+#define VLV_EDP_PSR_SINGLE_FRAME_UPDATE (1 << 7)
+#define VLV_EDP_PSR_ACTIVE_ENTRY (1 << 8)
+#define VLV_EDP_PSR_SRC_TRANSMITTER_STATE (1 << 9)
+#define VLV_EDP_PSR_DBL_FRAME (1 << 10)
+#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff << 16)
#define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16
#define VLV_PSRCTL(pipe) _MMIO_PIPE(pipe, _PSRCTLA, _PSRCTLB)
#define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0)
#define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0)
-#define VLV_EDP_PSR_SDP_FREQ_MASK (3<<30)
-#define VLV_EDP_PSR_SDP_FREQ_ONCE (1<<31)
-#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1<<30)
+#define VLV_EDP_PSR_SDP_FREQ_MASK (3 << 30)
+#define VLV_EDP_PSR_SDP_FREQ_ONCE (1 << 31)
+#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1 << 30)
#define VLV_VSCSDP(pipe) _MMIO_PIPE(pipe, _VSCSDPA, _VSCSDPB)
#define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094)
#define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094)
-#define VLV_EDP_PSR_LAST_STATE_MASK (7<<3)
+#define VLV_EDP_PSR_LAST_STATE_MASK (7 << 3)
#define VLV_EDP_PSR_CURR_STATE_MASK 7
-#define VLV_EDP_PSR_DISABLED (0<<0)
-#define VLV_EDP_PSR_INACTIVE (1<<0)
-#define VLV_EDP_PSR_IN_TRANS_TO_ACTIVE (2<<0)
-#define VLV_EDP_PSR_ACTIVE_NORFB_UP (3<<0)
-#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4<<0)
-#define VLV_EDP_PSR_EXIT (5<<0)
-#define VLV_EDP_PSR_IN_TRANS (1<<7)
+#define VLV_EDP_PSR_DISABLED (0 << 0)
+#define VLV_EDP_PSR_INACTIVE (1 << 0)
+#define VLV_EDP_PSR_IN_TRANS_TO_ACTIVE (2 << 0)
+#define VLV_EDP_PSR_ACTIVE_NORFB_UP (3 << 0)
+#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4 << 0)
+#define VLV_EDP_PSR_EXIT (5 << 0)
+#define VLV_EDP_PSR_IN_TRANS (1 << 7)
#define VLV_PSRSTAT(pipe) _MMIO_PIPE(pipe, _PSRSTATA, _PSRSTATB)
/* HSW+ eDP PSR registers */
#define HSW_EDP_PSR_BASE 0x64800
#define BDW_EDP_PSR_BASE 0x6f800
#define EDP_PSR_CTL _MMIO(dev_priv->psr_mmio_base + 0)
-#define EDP_PSR_ENABLE (1<<31)
-#define BDW_PSR_SINGLE_FRAME (1<<30)
-#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1<<29) /* SW can't modify */
-#define EDP_PSR_LINK_STANDBY (1<<27)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1<<25)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2<<25)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3<<25)
+#define EDP_PSR_ENABLE (1 << 31)
+#define BDW_PSR_SINGLE_FRAME (1 << 30)
+#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */
+#define EDP_PSR_LINK_STANDBY (1 << 27)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3 << 25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0 << 25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1 << 25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2 << 25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3 << 25)
#define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20
-#define EDP_PSR_SKIP_AUX_EXIT (1<<12)
-#define EDP_PSR_TP1_TP2_SEL (0<<11)
-#define EDP_PSR_TP1_TP3_SEL (1<<11)
-#define EDP_PSR_TP2_TP3_TIME_500us (0<<8)
-#define EDP_PSR_TP2_TP3_TIME_100us (1<<8)
-#define EDP_PSR_TP2_TP3_TIME_2500us (2<<8)
-#define EDP_PSR_TP2_TP3_TIME_0us (3<<8)
-#define EDP_PSR_TP1_TIME_500us (0<<4)
-#define EDP_PSR_TP1_TIME_100us (1<<4)
-#define EDP_PSR_TP1_TIME_2500us (2<<4)
-#define EDP_PSR_TP1_TIME_0us (3<<4)
+#define EDP_PSR_SKIP_AUX_EXIT (1 << 12)
+#define EDP_PSR_TP1_TP2_SEL (0 << 11)
+#define EDP_PSR_TP1_TP3_SEL (1 << 11)
+#define EDP_PSR_CRC_ENABLE (1 << 10) /* BDW+ */
+#define EDP_PSR_TP2_TP3_TIME_500us (0 << 8)
+#define EDP_PSR_TP2_TP3_TIME_100us (1 << 8)
+#define EDP_PSR_TP2_TP3_TIME_2500us (2 << 8)
+#define EDP_PSR_TP2_TP3_TIME_0us (3 << 8)
+#define EDP_PSR_TP1_TIME_500us (0 << 4)
+#define EDP_PSR_TP1_TIME_100us (1 << 4)
+#define EDP_PSR_TP1_TIME_2500us (2 << 4)
+#define EDP_PSR_TP1_TIME_0us (3 << 4)
#define EDP_PSR_IDLE_FRAME_SHIFT 0
/* Bspec claims those aren't shifted but stay at 0x64800 */
@@ -4052,55 +4174,56 @@ enum {
#define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */
#define EDP_PSR_STATUS _MMIO(dev_priv->psr_mmio_base + 0x40)
-#define EDP_PSR_STATUS_STATE_MASK (7<<29)
-#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
-#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
-#define EDP_PSR_STATUS_STATE_SRDENT (2<<29)
-#define EDP_PSR_STATUS_STATE_BUFOFF (3<<29)
-#define EDP_PSR_STATUS_STATE_BUFON (4<<29)
-#define EDP_PSR_STATUS_STATE_AUXACK (5<<29)
-#define EDP_PSR_STATUS_STATE_SRDOFFACK (6<<29)
-#define EDP_PSR_STATUS_LINK_MASK (3<<26)
-#define EDP_PSR_STATUS_LINK_FULL_OFF (0<<26)
-#define EDP_PSR_STATUS_LINK_FULL_ON (1<<26)
-#define EDP_PSR_STATUS_LINK_STANDBY (2<<26)
+#define EDP_PSR_STATUS_STATE_MASK (7 << 29)
+#define EDP_PSR_STATUS_STATE_SHIFT 29
+#define EDP_PSR_STATUS_STATE_IDLE (0 << 29)
+#define EDP_PSR_STATUS_STATE_SRDONACK (1 << 29)
+#define EDP_PSR_STATUS_STATE_SRDENT (2 << 29)
+#define EDP_PSR_STATUS_STATE_BUFOFF (3 << 29)
+#define EDP_PSR_STATUS_STATE_BUFON (4 << 29)
+#define EDP_PSR_STATUS_STATE_AUXACK (5 << 29)
+#define EDP_PSR_STATUS_STATE_SRDOFFACK (6 << 29)
+#define EDP_PSR_STATUS_LINK_MASK (3 << 26)
+#define EDP_PSR_STATUS_LINK_FULL_OFF (0 << 26)
+#define EDP_PSR_STATUS_LINK_FULL_ON (1 << 26)
+#define EDP_PSR_STATUS_LINK_STANDBY (2 << 26)
#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20
#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK 0x1f
#define EDP_PSR_STATUS_COUNT_SHIFT 16
#define EDP_PSR_STATUS_COUNT_MASK 0xf
-#define EDP_PSR_STATUS_AUX_ERROR (1<<15)
-#define EDP_PSR_STATUS_AUX_SENDING (1<<12)
-#define EDP_PSR_STATUS_SENDING_IDLE (1<<9)
-#define EDP_PSR_STATUS_SENDING_TP2_TP3 (1<<8)
-#define EDP_PSR_STATUS_SENDING_TP1 (1<<4)
+#define EDP_PSR_STATUS_AUX_ERROR (1 << 15)
+#define EDP_PSR_STATUS_AUX_SENDING (1 << 12)
+#define EDP_PSR_STATUS_SENDING_IDLE (1 << 9)
+#define EDP_PSR_STATUS_SENDING_TP2_TP3 (1 << 8)
+#define EDP_PSR_STATUS_SENDING_TP1 (1 << 4)
#define EDP_PSR_STATUS_IDLE_MASK 0xf
#define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44)
#define EDP_PSR_PERF_CNT_MASK 0xffffff
#define EDP_PSR_DEBUG _MMIO(dev_priv->psr_mmio_base + 0x60) /* PSR_MASK on SKL+ */
-#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1<<28)
-#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
-#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
-#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
-#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1<<16)
-#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1<<15) /* SKL+ */
+#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1 << 28)
+#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27)
+#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26)
+#define EDP_PSR_DEBUG_MASK_HPD (1 << 25)
+#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16)
+#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */
#define EDP_PSR2_CTL _MMIO(0x6f900)
-#define EDP_PSR2_ENABLE (1<<31)
-#define EDP_SU_TRACK_ENABLE (1<<30)
-#define EDP_Y_COORDINATE_VALID (1<<26) /* GLK and CNL+ */
-#define EDP_Y_COORDINATE_ENABLE (1<<25) /* GLK and CNL+ */
-#define EDP_MAX_SU_DISABLE_TIME(t) ((t)<<20)
-#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f<<20)
-#define EDP_PSR2_TP2_TIME_500 (0<<8)
-#define EDP_PSR2_TP2_TIME_100 (1<<8)
-#define EDP_PSR2_TP2_TIME_2500 (2<<8)
-#define EDP_PSR2_TP2_TIME_50 (3<<8)
-#define EDP_PSR2_TP2_TIME_MASK (3<<8)
+#define EDP_PSR2_ENABLE (1 << 31)
+#define EDP_SU_TRACK_ENABLE (1 << 30)
+#define EDP_Y_COORDINATE_VALID (1 << 26) /* GLK and CNL+ */
+#define EDP_Y_COORDINATE_ENABLE (1 << 25) /* GLK and CNL+ */
+#define EDP_MAX_SU_DISABLE_TIME(t) ((t) << 20)
+#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f << 20)
+#define EDP_PSR2_TP2_TIME_500us (0 << 8)
+#define EDP_PSR2_TP2_TIME_100us (1 << 8)
+#define EDP_PSR2_TP2_TIME_2500us (2 << 8)
+#define EDP_PSR2_TP2_TIME_50us (3 << 8)
+#define EDP_PSR2_TP2_TIME_MASK (3 << 8)
#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
-#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4)
-#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a)<<4)
+#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf << 4)
+#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a) << 4)
#define EDP_PSR2_IDLE_FRAME_MASK 0xf
#define EDP_PSR2_IDLE_FRAME_SHIFT 0
@@ -4128,7 +4251,7 @@ enum {
#define PSR_EVENT_PSR_DISABLE (1 << 0)
#define EDP_PSR2_STATUS _MMIO(0x6f940)
-#define EDP_PSR2_STATUS_STATE_MASK (0xf<<28)
+#define EDP_PSR2_STATUS_STATE_MASK (0xf << 28)
#define EDP_PSR2_STATUS_STATE_SHIFT 28
/* VGA port control */
@@ -4136,47 +4259,48 @@ enum {
#define PCH_ADPA _MMIO(0xe1100)
#define VLV_ADPA _MMIO(VLV_DISPLAY_BASE + 0x61100)
-#define ADPA_DAC_ENABLE (1<<31)
+#define ADPA_DAC_ENABLE (1 << 31)
#define ADPA_DAC_DISABLE 0
-#define ADPA_PIPE_SELECT_MASK (1<<30)
-#define ADPA_PIPE_A_SELECT 0
-#define ADPA_PIPE_B_SELECT (1<<30)
-#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
-/* CPT uses bits 29:30 for pch transcoder select */
+#define ADPA_PIPE_SEL_SHIFT 30
+#define ADPA_PIPE_SEL_MASK (1 << 30)
+#define ADPA_PIPE_SEL(pipe) ((pipe) << 30)
+#define ADPA_PIPE_SEL_SHIFT_CPT 29
+#define ADPA_PIPE_SEL_MASK_CPT (3 << 29)
+#define ADPA_PIPE_SEL_CPT(pipe) ((pipe) << 29)
#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
-#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
-#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
-#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
-#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
-#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
-#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
-#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
-#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
-#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
-#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
-#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
-#define ADPA_USE_VGA_HVPOLARITY (1<<15)
+#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0 << 24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3 << 24)
+#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3 << 24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2 << 24)
+#define ADPA_CRT_HOTPLUG_ENABLE (1 << 23)
+#define ADPA_CRT_HOTPLUG_PERIOD_64 (0 << 22)
+#define ADPA_CRT_HOTPLUG_PERIOD_128 (1 << 22)
+#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0 << 21)
+#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1 << 21)
+#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0 << 20)
+#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1 << 20)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0 << 18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1 << 18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2 << 18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3 << 18)
+#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0 << 17)
+#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1 << 17)
+#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1 << 16)
+#define ADPA_USE_VGA_HVPOLARITY (1 << 15)
#define ADPA_SETS_HVPOLARITY 0
-#define ADPA_VSYNC_CNTL_DISABLE (1<<10)
+#define ADPA_VSYNC_CNTL_DISABLE (1 << 10)
#define ADPA_VSYNC_CNTL_ENABLE 0
-#define ADPA_HSYNC_CNTL_DISABLE (1<<11)
+#define ADPA_HSYNC_CNTL_DISABLE (1 << 11)
#define ADPA_HSYNC_CNTL_ENABLE 0
-#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
+#define ADPA_VSYNC_ACTIVE_HIGH (1 << 4)
#define ADPA_VSYNC_ACTIVE_LOW 0
-#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
+#define ADPA_HSYNC_ACTIVE_HIGH (1 << 3)
#define ADPA_HSYNC_ACTIVE_LOW 0
-#define ADPA_DPMS_MASK (~(3<<10))
-#define ADPA_DPMS_ON (0<<10)
-#define ADPA_DPMS_SUSPEND (1<<10)
-#define ADPA_DPMS_STANDBY (2<<10)
-#define ADPA_DPMS_OFF (3<<10)
+#define ADPA_DPMS_MASK (~(3 << 10))
+#define ADPA_DPMS_ON (0 << 10)
+#define ADPA_DPMS_SUSPEND (1 << 10)
+#define ADPA_DPMS_STANDBY (2 << 10)
+#define ADPA_DPMS_OFF (3 << 10)
/* Hotplug control (945+ only) */
@@ -4301,9 +4425,9 @@ enum {
/* Gen 3 SDVO bits: */
#define SDVO_ENABLE (1 << 31)
-#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
+#define SDVO_PIPE_SEL_SHIFT 30
#define SDVO_PIPE_SEL_MASK (1 << 30)
-#define SDVO_PIPE_B_SELECT (1 << 30)
+#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
#define SDVO_STALL_SELECT (1 << 29)
#define SDVO_INTERRUPT_ENABLE (1 << 26)
/*
@@ -4343,12 +4467,14 @@ enum {
#define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */
/* Gen 6 (CPT) SDVO/HDMI bits: */
-#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
+#define SDVO_PIPE_SEL_SHIFT_CPT 29
#define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
+#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
/* CHV SDVO/HDMI bits: */
-#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
+#define SDVO_PIPE_SEL_SHIFT_CHV 24
#define SDVO_PIPE_SEL_MASK_CHV (3 << 24)
+#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
/* DVO port control */
@@ -4359,7 +4485,9 @@ enum {
#define _DVOC 0x61160
#define DVOC _MMIO(_DVOC)
#define DVO_ENABLE (1 << 31)
-#define DVO_PIPE_B_SELECT (1 << 30)
+#define DVO_PIPE_SEL_SHIFT 30
+#define DVO_PIPE_SEL_MASK (1 << 30)
+#define DVO_PIPE_SEL(pipe) ((pipe) << 30)
#define DVO_PIPE_STALL_UNUSED (0 << 28)
#define DVO_PIPE_STALL (1 << 28)
#define DVO_PIPE_STALL_TV (2 << 28)
@@ -4381,7 +4509,7 @@ enum {
#define DVO_BLANK_ACTIVE_HIGH (1 << 2)
#define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */
#define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */
-#define DVO_PRESERVE_MASK (0x7<<24)
+#define DVO_PRESERVE_MASK (0x7 << 24)
#define DVOA_SRCDIM _MMIO(0x61124)
#define DVOB_SRCDIM _MMIO(0x61144)
#define DVOC_SRCDIM _MMIO(0x61164)
@@ -4396,9 +4524,12 @@ enum {
*/
#define LVDS_PORT_EN (1 << 31)
/* Selects pipe B for LVDS data. Must be set on pre-965. */
-#define LVDS_PIPEB_SELECT (1 << 30)
-#define LVDS_PIPE_MASK (1 << 30)
-#define LVDS_PIPE(pipe) ((pipe) << 30)
+#define LVDS_PIPE_SEL_SHIFT 30
+#define LVDS_PIPE_SEL_MASK (1 << 30)
+#define LVDS_PIPE_SEL(pipe) ((pipe) << 30)
+#define LVDS_PIPE_SEL_SHIFT_CPT 29
+#define LVDS_PIPE_SEL_MASK_CPT (3 << 29)
+#define LVDS_PIPE_SEL_CPT(pipe) ((pipe) << 29)
/* LVDS dithering flag on 965/g4x platform */
#define LVDS_ENABLE_DITHER (1 << 25)
/* LVDS sync polarity flags. Set to invert (i.e. negative) */
@@ -4471,6 +4602,16 @@ enum {
#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
+#define DRM_DIP_ENABLE (1 << 28)
+#define PSR_VSC_BIT_7_SET (1 << 27)
+#define VSC_SELECT_MASK (0x3 << 26)
+#define VSC_SELECT_SHIFT 26
+#define VSC_DIP_HW_HEA_DATA (0 << 26)
+#define VSC_DIP_HW_HEA_SW_DATA (1 << 26)
+#define VSC_DIP_HW_DATA_SW_HEA (2 << 26)
+#define VSC_DIP_SW_HEA_DATA (3 << 26)
+#define VDIP_ENABLE_PPS (1 << 24)
+
/* Panel power sequencing */
#define PPS_BASE 0x61200
#define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE)
@@ -4695,7 +4836,9 @@ enum {
/* Enables the TV encoder */
# define TV_ENC_ENABLE (1 << 31)
/* Sources the TV encoder input from pipe B instead of A. */
-# define TV_ENC_PIPEB_SELECT (1 << 30)
+# define TV_ENC_PIPE_SEL_SHIFT 30
+# define TV_ENC_PIPE_SEL_MASK (1 << 30)
+# define TV_ENC_PIPE_SEL(pipe) ((pipe) << 30)
/* Outputs composite video (DAC A only) */
# define TV_ENC_OUTPUT_COMPOSITE (0 << 28)
/* Outputs SVideo video (DAC B/C) */
@@ -5177,10 +5320,15 @@ enum {
#define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300)
#define DP_PORT_EN (1 << 31)
-#define DP_PIPEB_SELECT (1 << 30)
-#define DP_PIPE_MASK (1 << 30)
-#define DP_PIPE_SELECT_CHV(pipe) ((pipe) << 16)
-#define DP_PIPE_MASK_CHV (3 << 16)
+#define DP_PIPE_SEL_SHIFT 30
+#define DP_PIPE_SEL_MASK (1 << 30)
+#define DP_PIPE_SEL(pipe) ((pipe) << 30)
+#define DP_PIPE_SEL_SHIFT_IVB 29
+#define DP_PIPE_SEL_MASK_IVB (3 << 29)
+#define DP_PIPE_SEL_IVB(pipe) ((pipe) << 29)
+#define DP_PIPE_SEL_SHIFT_CHV 16
+#define DP_PIPE_SEL_MASK_CHV (3 << 16)
+#define DP_PIPE_SEL_CHV(pipe) ((pipe) << 16)
/* Link training mode - select a suitable mode for each stage */
#define DP_LINK_TRAIN_PAT_1 (0 << 28)
@@ -5287,6 +5435,13 @@ enum {
#define _DPD_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64320)
#define _DPD_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64324)
+#define _DPE_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64410)
+#define _DPE_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64414)
+#define _DPE_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64418)
+#define _DPE_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6441c)
+#define _DPE_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64420)
+#define _DPE_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64424)
+
#define _DPF_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64510)
#define _DPF_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64514)
#define _DPF_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64518)
@@ -5342,7 +5497,7 @@ enum {
#define _PIPEB_DATA_M_G4X 0x71050
/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
-#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
+#define TU_SIZE(x) (((x) - 1) << 25) /* default size 64 */
#define TU_SIZE_SHIFT 25
#define TU_SIZE_MASK (0x3f << 25)
@@ -5384,18 +5539,18 @@ enum {
#define DSL_LINEMASK_GEN2 0x00000fff
#define DSL_LINEMASK_GEN3 0x00001fff
#define _PIPEACONF 0x70008
-#define PIPECONF_ENABLE (1<<31)
+#define PIPECONF_ENABLE (1 << 31)
#define PIPECONF_DISABLE 0
-#define PIPECONF_DOUBLE_WIDE (1<<30)
-#define I965_PIPECONF_ACTIVE (1<<30)
-#define PIPECONF_DSI_PLL_LOCKED (1<<29) /* vlv & pipe A only */
-#define PIPECONF_FRAME_START_DELAY_MASK (3<<27)
+#define PIPECONF_DOUBLE_WIDE (1 << 30)
+#define I965_PIPECONF_ACTIVE (1 << 30)
+#define PIPECONF_DSI_PLL_LOCKED (1 << 29) /* vlv & pipe A only */
+#define PIPECONF_FRAME_START_DELAY_MASK (3 << 27)
#define PIPECONF_SINGLE_WIDE 0
#define PIPECONF_PIPE_UNLOCKED 0
-#define PIPECONF_PIPE_LOCKED (1<<25)
+#define PIPECONF_PIPE_LOCKED (1 << 25)
#define PIPECONF_PALETTE 0
-#define PIPECONF_GAMMA (1<<24)
-#define PIPECONF_FORCE_BORDER (1<<25)
+#define PIPECONF_GAMMA (1 << 24)
+#define PIPECONF_FORCE_BORDER (1 << 25)
#define PIPECONF_INTERLACE_MASK (7 << 21)
#define PIPECONF_INTERLACE_MASK_HSW (3 << 21)
/* Note that pre-gen3 does not support interlaced display directly. Panel
@@ -5414,67 +5569,67 @@ enum {
#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
#define PIPECONF_INTERLACE_MODE_MASK (7 << 21)
#define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20)
-#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
+#define PIPECONF_CXSR_DOWNCLOCK (1 << 16)
#define PIPECONF_EDP_RR_MODE_SWITCH_VLV (1 << 14)
#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
#define PIPECONF_BPC_MASK (0x7 << 5)
-#define PIPECONF_8BPC (0<<5)
-#define PIPECONF_10BPC (1<<5)
-#define PIPECONF_6BPC (2<<5)
-#define PIPECONF_12BPC (3<<5)
-#define PIPECONF_DITHER_EN (1<<4)
+#define PIPECONF_8BPC (0 << 5)
+#define PIPECONF_10BPC (1 << 5)
+#define PIPECONF_6BPC (2 << 5)
+#define PIPECONF_12BPC (3 << 5)
+#define PIPECONF_DITHER_EN (1 << 4)
#define PIPECONF_DITHER_TYPE_MASK (0x0000000c)
-#define PIPECONF_DITHER_TYPE_SP (0<<2)
-#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
-#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
-#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
+#define PIPECONF_DITHER_TYPE_SP (0 << 2)
+#define PIPECONF_DITHER_TYPE_ST1 (1 << 2)
+#define PIPECONF_DITHER_TYPE_ST2 (2 << 2)
+#define PIPECONF_DITHER_TYPE_TEMP (3 << 2)
#define _PIPEASTAT 0x70024
-#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
-#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL<<30)
-#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
-#define PIPE_CRC_DONE_ENABLE (1UL<<28)
-#define PERF_COUNTER2_INTERRUPT_EN (1UL<<27)
-#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
-#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26)
-#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
-#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
-#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
-#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
-#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<22)
-#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
-#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
-#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
-#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL<<19)
-#define PERF_COUNTER_INTERRUPT_EN (1UL<<19)
-#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
-#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
-#define PIPE_FRAMESTART_INTERRUPT_ENABLE (1UL<<17)
-#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
-#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
-#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
-#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL<<15)
-#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL<<14)
-#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
-#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
-#define PERF_COUNTER2_INTERRUPT_STATUS (1UL<<11)
-#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
-#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL<<10)
-#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
-#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
-#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
-#define PIPE_DPST_EVENT_STATUS (1UL<<7)
-#define PIPE_A_PSR_STATUS_VLV (1UL<<6)
-#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
-#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
-#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
-#define PIPE_B_PSR_STATUS_VLV (1UL<<3)
-#define PERF_COUNTER_INTERRUPT_STATUS (1UL<<3)
-#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */
-#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
-#define PIPE_FRAMESTART_INTERRUPT_STATUS (1UL<<1)
-#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
-#define PIPE_HBLANK_INT_STATUS (1UL<<0)
-#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
+#define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31)
+#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30)
+#define PIPE_CRC_ERROR_ENABLE (1UL << 29)
+#define PIPE_CRC_DONE_ENABLE (1UL << 28)
+#define PERF_COUNTER2_INTERRUPT_EN (1UL << 27)
+#define PIPE_GMBUS_EVENT_ENABLE (1UL << 27)
+#define PLANE_FLIP_DONE_INT_EN_VLV (1UL << 26)
+#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL << 26)
+#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL << 25)
+#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL << 24)
+#define PIPE_DPST_EVENT_ENABLE (1UL << 23)
+#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL << 22)
+#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL << 22)
+#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL << 21)
+#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL << 20)
+#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL << 19)
+#define PERF_COUNTER_INTERRUPT_EN (1UL << 19)
+#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL << 18) /* pre-965 */
+#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18) /* 965 or later */
+#define PIPE_FRAMESTART_INTERRUPT_ENABLE (1UL << 17)
+#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17)
+#define PIPEA_HBLANK_INT_EN_VLV (1UL << 16)
+#define PIPE_OVERLAY_UPDATED_ENABLE (1UL << 16)
+#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL << 15)
+#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL << 14)
+#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL << 13)
+#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL << 12)
+#define PERF_COUNTER2_INTERRUPT_STATUS (1UL << 11)
+#define PIPE_GMBUS_INTERRUPT_STATUS (1UL << 11)
+#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL << 10)
+#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL << 10)
+#define PIPE_VSYNC_INTERRUPT_STATUS (1UL << 9)
+#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL << 8)
+#define PIPE_DPST_EVENT_STATUS (1UL << 7)
+#define PIPE_A_PSR_STATUS_VLV (1UL << 6)
+#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL << 6)
+#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL << 5)
+#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL << 4)
+#define PIPE_B_PSR_STATUS_VLV (1UL << 3)
+#define PERF_COUNTER_INTERRUPT_STATUS (1UL << 3)
+#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL << 2) /* pre-965 */
+#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL << 2) /* 965 or later */
+#define PIPE_FRAMESTART_INTERRUPT_STATUS (1UL << 1)
+#define PIPE_VBLANK_INTERRUPT_STATUS (1UL << 1)
+#define PIPE_HBLANK_INT_STATUS (1UL << 0)
+#define PIPE_OVERLAY_UPDATED_STATUS (1UL << 0)
#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
@@ -5503,67 +5658,67 @@ enum {
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
-#define PIPEMISC_YUV420_ENABLE (1<<27)
-#define PIPEMISC_YUV420_MODE_FULL_BLEND (1<<26)
-#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1<<11)
-#define PIPEMISC_DITHER_BPC_MASK (7<<5)
-#define PIPEMISC_DITHER_8_BPC (0<<5)
-#define PIPEMISC_DITHER_10_BPC (1<<5)
-#define PIPEMISC_DITHER_6_BPC (2<<5)
-#define PIPEMISC_DITHER_12_BPC (3<<5)
-#define PIPEMISC_DITHER_ENABLE (1<<4)
-#define PIPEMISC_DITHER_TYPE_MASK (3<<2)
-#define PIPEMISC_DITHER_TYPE_SP (0<<2)
+#define PIPEMISC_YUV420_ENABLE (1 << 27)
+#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26)
+#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11)
+#define PIPEMISC_DITHER_BPC_MASK (7 << 5)
+#define PIPEMISC_DITHER_8_BPC (0 << 5)
+#define PIPEMISC_DITHER_10_BPC (1 << 5)
+#define PIPEMISC_DITHER_6_BPC (2 << 5)
+#define PIPEMISC_DITHER_12_BPC (3 << 5)
+#define PIPEMISC_DITHER_ENABLE (1 << 4)
+#define PIPEMISC_DITHER_TYPE_MASK (3 << 2)
+#define PIPEMISC_DITHER_TYPE_SP (0 << 2)
#define PIPEMISC(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC_A)
#define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028)
-#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
-#define PIPEB_HLINE_INT_EN (1<<28)
-#define PIPEB_VBLANK_INT_EN (1<<27)
-#define SPRITED_FLIP_DONE_INT_EN (1<<26)
-#define SPRITEC_FLIP_DONE_INT_EN (1<<25)
-#define PLANEB_FLIP_DONE_INT_EN (1<<24)
-#define PIPE_PSR_INT_EN (1<<22)
-#define PIPEA_LINE_COMPARE_INT_EN (1<<21)
-#define PIPEA_HLINE_INT_EN (1<<20)
-#define PIPEA_VBLANK_INT_EN (1<<19)
-#define SPRITEB_FLIP_DONE_INT_EN (1<<18)
-#define SPRITEA_FLIP_DONE_INT_EN (1<<17)
-#define PLANEA_FLIPDONE_INT_EN (1<<16)
-#define PIPEC_LINE_COMPARE_INT_EN (1<<13)
-#define PIPEC_HLINE_INT_EN (1<<12)
-#define PIPEC_VBLANK_INT_EN (1<<11)
-#define SPRITEF_FLIPDONE_INT_EN (1<<10)
-#define SPRITEE_FLIPDONE_INT_EN (1<<9)
-#define PLANEC_FLIPDONE_INT_EN (1<<8)
+#define PIPEB_LINE_COMPARE_INT_EN (1 << 29)
+#define PIPEB_HLINE_INT_EN (1 << 28)
+#define PIPEB_VBLANK_INT_EN (1 << 27)
+#define SPRITED_FLIP_DONE_INT_EN (1 << 26)
+#define SPRITEC_FLIP_DONE_INT_EN (1 << 25)
+#define PLANEB_FLIP_DONE_INT_EN (1 << 24)
+#define PIPE_PSR_INT_EN (1 << 22)
+#define PIPEA_LINE_COMPARE_INT_EN (1 << 21)
+#define PIPEA_HLINE_INT_EN (1 << 20)
+#define PIPEA_VBLANK_INT_EN (1 << 19)
+#define SPRITEB_FLIP_DONE_INT_EN (1 << 18)
+#define SPRITEA_FLIP_DONE_INT_EN (1 << 17)
+#define PLANEA_FLIPDONE_INT_EN (1 << 16)
+#define PIPEC_LINE_COMPARE_INT_EN (1 << 13)
+#define PIPEC_HLINE_INT_EN (1 << 12)
+#define PIPEC_VBLANK_INT_EN (1 << 11)
+#define SPRITEF_FLIPDONE_INT_EN (1 << 10)
+#define SPRITEE_FLIPDONE_INT_EN (1 << 9)
+#define PLANEC_FLIPDONE_INT_EN (1 << 8)
#define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
-#define SPRITEF_INVALID_GTT_INT_EN (1<<27)
-#define SPRITEE_INVALID_GTT_INT_EN (1<<26)
-#define PLANEC_INVALID_GTT_INT_EN (1<<25)
-#define CURSORC_INVALID_GTT_INT_EN (1<<24)
-#define CURSORB_INVALID_GTT_INT_EN (1<<23)
-#define CURSORA_INVALID_GTT_INT_EN (1<<22)
-#define SPRITED_INVALID_GTT_INT_EN (1<<21)
-#define SPRITEC_INVALID_GTT_INT_EN (1<<20)
-#define PLANEB_INVALID_GTT_INT_EN (1<<19)
-#define SPRITEB_INVALID_GTT_INT_EN (1<<18)
-#define SPRITEA_INVALID_GTT_INT_EN (1<<17)
-#define PLANEA_INVALID_GTT_INT_EN (1<<16)
+#define SPRITEF_INVALID_GTT_INT_EN (1 << 27)
+#define SPRITEE_INVALID_GTT_INT_EN (1 << 26)
+#define PLANEC_INVALID_GTT_INT_EN (1 << 25)
+#define CURSORC_INVALID_GTT_INT_EN (1 << 24)
+#define CURSORB_INVALID_GTT_INT_EN (1 << 23)
+#define CURSORA_INVALID_GTT_INT_EN (1 << 22)
+#define SPRITED_INVALID_GTT_INT_EN (1 << 21)
+#define SPRITEC_INVALID_GTT_INT_EN (1 << 20)
+#define PLANEB_INVALID_GTT_INT_EN (1 << 19)
+#define SPRITEB_INVALID_GTT_INT_EN (1 << 18)
+#define SPRITEA_INVALID_GTT_INT_EN (1 << 17)
+#define PLANEA_INVALID_GTT_INT_EN (1 << 16)
#define DPINVGTT_EN_MASK 0xff0000
#define DPINVGTT_EN_MASK_CHV 0xfff0000
-#define SPRITEF_INVALID_GTT_STATUS (1<<11)
-#define SPRITEE_INVALID_GTT_STATUS (1<<10)
-#define PLANEC_INVALID_GTT_STATUS (1<<9)
-#define CURSORC_INVALID_GTT_STATUS (1<<8)
-#define CURSORB_INVALID_GTT_STATUS (1<<7)
-#define CURSORA_INVALID_GTT_STATUS (1<<6)
-#define SPRITED_INVALID_GTT_STATUS (1<<5)
-#define SPRITEC_INVALID_GTT_STATUS (1<<4)
-#define PLANEB_INVALID_GTT_STATUS (1<<3)
-#define SPRITEB_INVALID_GTT_STATUS (1<<2)
-#define SPRITEA_INVALID_GTT_STATUS (1<<1)
-#define PLANEA_INVALID_GTT_STATUS (1<<0)
+#define SPRITEF_INVALID_GTT_STATUS (1 << 11)
+#define SPRITEE_INVALID_GTT_STATUS (1 << 10)
+#define PLANEC_INVALID_GTT_STATUS (1 << 9)
+#define CURSORC_INVALID_GTT_STATUS (1 << 8)
+#define CURSORB_INVALID_GTT_STATUS (1 << 7)
+#define CURSORA_INVALID_GTT_STATUS (1 << 6)
+#define SPRITED_INVALID_GTT_STATUS (1 << 5)
+#define SPRITEC_INVALID_GTT_STATUS (1 << 4)
+#define PLANEB_INVALID_GTT_STATUS (1 << 3)
+#define SPRITEB_INVALID_GTT_STATUS (1 << 2)
+#define SPRITEA_INVALID_GTT_STATUS (1 << 1)
+#define PLANEA_INVALID_GTT_STATUS (1 << 0)
#define DPINVGTT_STATUS_MASK 0xff
#define DPINVGTT_STATUS_MASK_CHV 0xfff
@@ -5604,149 +5759,149 @@ enum {
/* pnv/gen4/g4x/vlv/chv */
#define DSPFW1 _MMIO(dev_priv->info.display_mmio_offset + 0x70034)
#define DSPFW_SR_SHIFT 23
-#define DSPFW_SR_MASK (0x1ff<<23)
+#define DSPFW_SR_MASK (0x1ff << 23)
#define DSPFW_CURSORB_SHIFT 16
-#define DSPFW_CURSORB_MASK (0x3f<<16)
+#define DSPFW_CURSORB_MASK (0x3f << 16)
#define DSPFW_PLANEB_SHIFT 8
-#define DSPFW_PLANEB_MASK (0x7f<<8)
-#define DSPFW_PLANEB_MASK_VLV (0xff<<8) /* vlv/chv */
+#define DSPFW_PLANEB_MASK (0x7f << 8)
+#define DSPFW_PLANEB_MASK_VLV (0xff << 8) /* vlv/chv */
#define DSPFW_PLANEA_SHIFT 0
-#define DSPFW_PLANEA_MASK (0x7f<<0)
-#define DSPFW_PLANEA_MASK_VLV (0xff<<0) /* vlv/chv */
+#define DSPFW_PLANEA_MASK (0x7f << 0)
+#define DSPFW_PLANEA_MASK_VLV (0xff << 0) /* vlv/chv */
#define DSPFW2 _MMIO(dev_priv->info.display_mmio_offset + 0x70038)
-#define DSPFW_FBC_SR_EN (1<<31) /* g4x */
+#define DSPFW_FBC_SR_EN (1 << 31) /* g4x */
#define DSPFW_FBC_SR_SHIFT 28
-#define DSPFW_FBC_SR_MASK (0x7<<28) /* g4x */
+#define DSPFW_FBC_SR_MASK (0x7 << 28) /* g4x */
#define DSPFW_FBC_HPLL_SR_SHIFT 24
-#define DSPFW_FBC_HPLL_SR_MASK (0xf<<24) /* g4x */
+#define DSPFW_FBC_HPLL_SR_MASK (0xf << 24) /* g4x */
#define DSPFW_SPRITEB_SHIFT (16)
-#define DSPFW_SPRITEB_MASK (0x7f<<16) /* g4x */
-#define DSPFW_SPRITEB_MASK_VLV (0xff<<16) /* vlv/chv */
+#define DSPFW_SPRITEB_MASK (0x7f << 16) /* g4x */
+#define DSPFW_SPRITEB_MASK_VLV (0xff << 16) /* vlv/chv */
#define DSPFW_CURSORA_SHIFT 8
-#define DSPFW_CURSORA_MASK (0x3f<<8)
+#define DSPFW_CURSORA_MASK (0x3f << 8)
#define DSPFW_PLANEC_OLD_SHIFT 0
-#define DSPFW_PLANEC_OLD_MASK (0x7f<<0) /* pre-gen4 sprite C */
+#define DSPFW_PLANEC_OLD_MASK (0x7f << 0) /* pre-gen4 sprite C */
#define DSPFW_SPRITEA_SHIFT 0
-#define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */
-#define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */
+#define DSPFW_SPRITEA_MASK (0x7f << 0) /* g4x */
+#define DSPFW_SPRITEA_MASK_VLV (0xff << 0) /* vlv/chv */
#define DSPFW3 _MMIO(dev_priv->info.display_mmio_offset + 0x7003c)
-#define DSPFW_HPLL_SR_EN (1<<31)
-#define PINEVIEW_SELF_REFRESH_EN (1<<30)
+#define DSPFW_HPLL_SR_EN (1 << 31)
+#define PINEVIEW_SELF_REFRESH_EN (1 << 30)
#define DSPFW_CURSOR_SR_SHIFT 24
-#define DSPFW_CURSOR_SR_MASK (0x3f<<24)
+#define DSPFW_CURSOR_SR_MASK (0x3f << 24)
#define DSPFW_HPLL_CURSOR_SHIFT 16
-#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
+#define DSPFW_HPLL_CURSOR_MASK (0x3f << 16)
#define DSPFW_HPLL_SR_SHIFT 0
-#define DSPFW_HPLL_SR_MASK (0x1ff<<0)
+#define DSPFW_HPLL_SR_MASK (0x1ff << 0)
/* vlv/chv */
#define DSPFW4 _MMIO(VLV_DISPLAY_BASE + 0x70070)
#define DSPFW_SPRITEB_WM1_SHIFT 16
-#define DSPFW_SPRITEB_WM1_MASK (0xff<<16)
+#define DSPFW_SPRITEB_WM1_MASK (0xff << 16)
#define DSPFW_CURSORA_WM1_SHIFT 8
-#define DSPFW_CURSORA_WM1_MASK (0x3f<<8)
+#define DSPFW_CURSORA_WM1_MASK (0x3f << 8)
#define DSPFW_SPRITEA_WM1_SHIFT 0
-#define DSPFW_SPRITEA_WM1_MASK (0xff<<0)
+#define DSPFW_SPRITEA_WM1_MASK (0xff << 0)
#define DSPFW5 _MMIO(VLV_DISPLAY_BASE + 0x70074)
#define DSPFW_PLANEB_WM1_SHIFT 24
-#define DSPFW_PLANEB_WM1_MASK (0xff<<24)
+#define DSPFW_PLANEB_WM1_MASK (0xff << 24)
#define DSPFW_PLANEA_WM1_SHIFT 16
-#define DSPFW_PLANEA_WM1_MASK (0xff<<16)
+#define DSPFW_PLANEA_WM1_MASK (0xff << 16)
#define DSPFW_CURSORB_WM1_SHIFT 8
-#define DSPFW_CURSORB_WM1_MASK (0x3f<<8)
+#define DSPFW_CURSORB_WM1_MASK (0x3f << 8)
#define DSPFW_CURSOR_SR_WM1_SHIFT 0
-#define DSPFW_CURSOR_SR_WM1_MASK (0x3f<<0)
+#define DSPFW_CURSOR_SR_WM1_MASK (0x3f << 0)
#define DSPFW6 _MMIO(VLV_DISPLAY_BASE + 0x70078)
#define DSPFW_SR_WM1_SHIFT 0
-#define DSPFW_SR_WM1_MASK (0x1ff<<0)
+#define DSPFW_SR_WM1_MASK (0x1ff << 0)
#define DSPFW7 _MMIO(VLV_DISPLAY_BASE + 0x7007c)
#define DSPFW7_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */
#define DSPFW_SPRITED_WM1_SHIFT 24
-#define DSPFW_SPRITED_WM1_MASK (0xff<<24)
+#define DSPFW_SPRITED_WM1_MASK (0xff << 24)
#define DSPFW_SPRITED_SHIFT 16
-#define DSPFW_SPRITED_MASK_VLV (0xff<<16)
+#define DSPFW_SPRITED_MASK_VLV (0xff << 16)
#define DSPFW_SPRITEC_WM1_SHIFT 8
-#define DSPFW_SPRITEC_WM1_MASK (0xff<<8)
+#define DSPFW_SPRITEC_WM1_MASK (0xff << 8)
#define DSPFW_SPRITEC_SHIFT 0
-#define DSPFW_SPRITEC_MASK_VLV (0xff<<0)
+#define DSPFW_SPRITEC_MASK_VLV (0xff << 0)
#define DSPFW8_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b8)
#define DSPFW_SPRITEF_WM1_SHIFT 24
-#define DSPFW_SPRITEF_WM1_MASK (0xff<<24)
+#define DSPFW_SPRITEF_WM1_MASK (0xff << 24)
#define DSPFW_SPRITEF_SHIFT 16
-#define DSPFW_SPRITEF_MASK_VLV (0xff<<16)
+#define DSPFW_SPRITEF_MASK_VLV (0xff << 16)
#define DSPFW_SPRITEE_WM1_SHIFT 8
-#define DSPFW_SPRITEE_WM1_MASK (0xff<<8)
+#define DSPFW_SPRITEE_WM1_MASK (0xff << 8)
#define DSPFW_SPRITEE_SHIFT 0
-#define DSPFW_SPRITEE_MASK_VLV (0xff<<0)
+#define DSPFW_SPRITEE_MASK_VLV (0xff << 0)
#define DSPFW9_CHV _MMIO(VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
#define DSPFW_PLANEC_WM1_SHIFT 24
-#define DSPFW_PLANEC_WM1_MASK (0xff<<24)
+#define DSPFW_PLANEC_WM1_MASK (0xff << 24)
#define DSPFW_PLANEC_SHIFT 16
-#define DSPFW_PLANEC_MASK_VLV (0xff<<16)
+#define DSPFW_PLANEC_MASK_VLV (0xff << 16)
#define DSPFW_CURSORC_WM1_SHIFT 8
-#define DSPFW_CURSORC_WM1_MASK (0x3f<<16)
+#define DSPFW_CURSORC_WM1_MASK (0x3f << 16)
#define DSPFW_CURSORC_SHIFT 0
-#define DSPFW_CURSORC_MASK (0x3f<<0)
+#define DSPFW_CURSORC_MASK (0x3f << 0)
/* vlv/chv high order bits */
#define DSPHOWM _MMIO(VLV_DISPLAY_BASE + 0x70064)
#define DSPFW_SR_HI_SHIFT 24
-#define DSPFW_SR_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
+#define DSPFW_SR_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */
#define DSPFW_SPRITEF_HI_SHIFT 23
-#define DSPFW_SPRITEF_HI_MASK (1<<23)
+#define DSPFW_SPRITEF_HI_MASK (1 << 23)
#define DSPFW_SPRITEE_HI_SHIFT 22
-#define DSPFW_SPRITEE_HI_MASK (1<<22)
+#define DSPFW_SPRITEE_HI_MASK (1 << 22)
#define DSPFW_PLANEC_HI_SHIFT 21
-#define DSPFW_PLANEC_HI_MASK (1<<21)
+#define DSPFW_PLANEC_HI_MASK (1 << 21)
#define DSPFW_SPRITED_HI_SHIFT 20
-#define DSPFW_SPRITED_HI_MASK (1<<20)
+#define DSPFW_SPRITED_HI_MASK (1 << 20)
#define DSPFW_SPRITEC_HI_SHIFT 16
-#define DSPFW_SPRITEC_HI_MASK (1<<16)
+#define DSPFW_SPRITEC_HI_MASK (1 << 16)
#define DSPFW_PLANEB_HI_SHIFT 12
-#define DSPFW_PLANEB_HI_MASK (1<<12)
+#define DSPFW_PLANEB_HI_MASK (1 << 12)
#define DSPFW_SPRITEB_HI_SHIFT 8
-#define DSPFW_SPRITEB_HI_MASK (1<<8)
+#define DSPFW_SPRITEB_HI_MASK (1 << 8)
#define DSPFW_SPRITEA_HI_SHIFT 4
-#define DSPFW_SPRITEA_HI_MASK (1<<4)
+#define DSPFW_SPRITEA_HI_MASK (1 << 4)
#define DSPFW_PLANEA_HI_SHIFT 0
-#define DSPFW_PLANEA_HI_MASK (1<<0)
+#define DSPFW_PLANEA_HI_MASK (1 << 0)
#define DSPHOWM1 _MMIO(VLV_DISPLAY_BASE + 0x70068)
#define DSPFW_SR_WM1_HI_SHIFT 24
-#define DSPFW_SR_WM1_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
+#define DSPFW_SR_WM1_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */
#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
-#define DSPFW_SPRITEF_WM1_HI_MASK (1<<23)
+#define DSPFW_SPRITEF_WM1_HI_MASK (1 << 23)
#define DSPFW_SPRITEE_WM1_HI_SHIFT 22
-#define DSPFW_SPRITEE_WM1_HI_MASK (1<<22)
+#define DSPFW_SPRITEE_WM1_HI_MASK (1 << 22)
#define DSPFW_PLANEC_WM1_HI_SHIFT 21
-#define DSPFW_PLANEC_WM1_HI_MASK (1<<21)
+#define DSPFW_PLANEC_WM1_HI_MASK (1 << 21)
#define DSPFW_SPRITED_WM1_HI_SHIFT 20
-#define DSPFW_SPRITED_WM1_HI_MASK (1<<20)
+#define DSPFW_SPRITED_WM1_HI_MASK (1 << 20)
#define DSPFW_SPRITEC_WM1_HI_SHIFT 16
-#define DSPFW_SPRITEC_WM1_HI_MASK (1<<16)
+#define DSPFW_SPRITEC_WM1_HI_MASK (1 << 16)
#define DSPFW_PLANEB_WM1_HI_SHIFT 12
-#define DSPFW_PLANEB_WM1_HI_MASK (1<<12)
+#define DSPFW_PLANEB_WM1_HI_MASK (1 << 12)
#define DSPFW_SPRITEB_WM1_HI_SHIFT 8
-#define DSPFW_SPRITEB_WM1_HI_MASK (1<<8)
+#define DSPFW_SPRITEB_WM1_HI_MASK (1 << 8)
#define DSPFW_SPRITEA_WM1_HI_SHIFT 4
-#define DSPFW_SPRITEA_WM1_HI_MASK (1<<4)
+#define DSPFW_SPRITEA_WM1_HI_MASK (1 << 4)
#define DSPFW_PLANEA_WM1_HI_SHIFT 0
-#define DSPFW_PLANEA_WM1_HI_MASK (1<<0)
+#define DSPFW_PLANEA_WM1_HI_MASK (1 << 0)
/* drain latency register values*/
#define VLV_DDL(pipe) _MMIO(VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
#define DDL_CURSOR_SHIFT 24
-#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite))
+#define DDL_SPRITE_SHIFT(sprite) (8 + 8 * (sprite))
#define DDL_PLANE_SHIFT 0
-#define DDL_PRECISION_HIGH (1<<7)
-#define DDL_PRECISION_LOW (0<<7)
+#define DDL_PRECISION_HIGH (1 << 7)
+#define DDL_PRECISION_LOW (0 << 7)
#define DRAIN_LATENCY_MASK 0x7f
#define CBR1_VLV _MMIO(VLV_DISPLAY_BASE + 0x70400)
-#define CBR_PND_DEADLINE_DISABLE (1<<31)
-#define CBR_PWM_CLOCK_MUX_SELECT (1<<30)
+#define CBR_PND_DEADLINE_DISABLE (1 << 31)
+#define CBR_PWM_CLOCK_MUX_SELECT (1 << 30)
#define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450)
-#define CBR_DPLLBMD_PIPE(pipe) (1<<(7+(pipe)*11)) /* pipes B and C */
+#define CBR_DPLLBMD_PIPE(pipe) (1 << (7 + (pipe) * 11)) /* pipes B and C */
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
@@ -5818,32 +5973,32 @@ enum {
/* define the Watermark register on Ironlake */
#define WM0_PIPEA_ILK _MMIO(0x45100)
-#define WM0_PIPE_PLANE_MASK (0xffff<<16)
+#define WM0_PIPE_PLANE_MASK (0xffff << 16)
#define WM0_PIPE_PLANE_SHIFT 16
-#define WM0_PIPE_SPRITE_MASK (0xff<<8)
+#define WM0_PIPE_SPRITE_MASK (0xff << 8)
#define WM0_PIPE_SPRITE_SHIFT 8
#define WM0_PIPE_CURSOR_MASK (0xff)
#define WM0_PIPEB_ILK _MMIO(0x45104)
#define WM0_PIPEC_IVB _MMIO(0x45200)
#define WM1_LP_ILK _MMIO(0x45108)
-#define WM1_LP_SR_EN (1<<31)
+#define WM1_LP_SR_EN (1 << 31)
#define WM1_LP_LATENCY_SHIFT 24
-#define WM1_LP_LATENCY_MASK (0x7f<<24)
-#define WM1_LP_FBC_MASK (0xf<<20)
+#define WM1_LP_LATENCY_MASK (0x7f << 24)
+#define WM1_LP_FBC_MASK (0xf << 20)
#define WM1_LP_FBC_SHIFT 20
#define WM1_LP_FBC_SHIFT_BDW 19
-#define WM1_LP_SR_MASK (0x7ff<<8)
+#define WM1_LP_SR_MASK (0x7ff << 8)
#define WM1_LP_SR_SHIFT 8
#define WM1_LP_CURSOR_MASK (0xff)
#define WM2_LP_ILK _MMIO(0x4510c)
-#define WM2_LP_EN (1<<31)
+#define WM2_LP_EN (1 << 31)
#define WM3_LP_ILK _MMIO(0x45110)
-#define WM3_LP_EN (1<<31)
+#define WM3_LP_EN (1 << 31)
#define WM1S_LP_ILK _MMIO(0x45120)
#define WM2S_LP_IVB _MMIO(0x45124)
#define WM3S_LP_IVB _MMIO(0x45128)
-#define WM1S_LP_EN (1<<31)
+#define WM1S_LP_EN (1 << 31)
#define HSW_WM_LP_VAL(lat, fbc, pri, cur) \
(WM3_LP_EN | ((lat) << WM1_LP_LATENCY_SHIFT) | \
@@ -5900,8 +6055,7 @@ enum {
#define CURSOR_ENABLE 0x80000000
#define CURSOR_GAMMA_ENABLE 0x40000000
#define CURSOR_STRIDE_SHIFT 28
-#define CURSOR_STRIDE(x) ((ffs(x)-9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */
-#define CURSOR_PIPE_CSC_ENABLE (1<<24)
+#define CURSOR_STRIDE(x) ((ffs(x) - 9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */
#define CURSOR_FORMAT_SHIFT 24
#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT)
@@ -5910,18 +6064,21 @@ enum {
#define CURSOR_FORMAT_ARGB (0x04 << CURSOR_FORMAT_SHIFT)
#define CURSOR_FORMAT_XRGB (0x05 << CURSOR_FORMAT_SHIFT)
/* New style CUR*CNTR flags */
-#define CURSOR_MODE 0x27
-#define CURSOR_MODE_DISABLE 0x00
-#define CURSOR_MODE_128_32B_AX 0x02
-#define CURSOR_MODE_256_32B_AX 0x03
-#define CURSOR_MODE_64_32B_AX 0x07
-#define CURSOR_MODE_128_ARGB_AX ((1 << 5) | CURSOR_MODE_128_32B_AX)
-#define CURSOR_MODE_256_ARGB_AX ((1 << 5) | CURSOR_MODE_256_32B_AX)
-#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
+#define MCURSOR_MODE 0x27
+#define MCURSOR_MODE_DISABLE 0x00
+#define MCURSOR_MODE_128_32B_AX 0x02
+#define MCURSOR_MODE_256_32B_AX 0x03
+#define MCURSOR_MODE_64_32B_AX 0x07
+#define MCURSOR_MODE_128_ARGB_AX ((1 << 5) | MCURSOR_MODE_128_32B_AX)
+#define MCURSOR_MODE_256_ARGB_AX ((1 << 5) | MCURSOR_MODE_256_32B_AX)
+#define MCURSOR_MODE_64_ARGB_AX ((1 << 5) | MCURSOR_MODE_64_32B_AX)
+#define MCURSOR_PIPE_SELECT_MASK (0x3 << 28)
+#define MCURSOR_PIPE_SELECT_SHIFT 28
#define MCURSOR_PIPE_SELECT(pipe) ((pipe) << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
-#define CURSOR_ROTATE_180 (1<<15)
-#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
+#define MCURSOR_PIPE_CSC_ENABLE (1 << 24)
+#define MCURSOR_ROTATE_180 (1 << 15)
+#define MCURSOR_TRICKLE_FEED_DISABLE (1 << 14)
#define _CURABASE 0x70084
#define _CURAPOS 0x70088
#define CURSOR_POS_MASK 0x007FF
@@ -5958,41 +6115,41 @@ enum {
/* Display A control */
#define _DSPACNTR 0x70180
-#define DISPLAY_PLANE_ENABLE (1<<31)
+#define DISPLAY_PLANE_ENABLE (1 << 31)
#define DISPLAY_PLANE_DISABLE 0
-#define DISPPLANE_GAMMA_ENABLE (1<<30)
+#define DISPPLANE_GAMMA_ENABLE (1 << 30)
#define DISPPLANE_GAMMA_DISABLE 0
-#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
-#define DISPPLANE_YUV422 (0x0<<26)
-#define DISPPLANE_8BPP (0x2<<26)
-#define DISPPLANE_BGRA555 (0x3<<26)
-#define DISPPLANE_BGRX555 (0x4<<26)
-#define DISPPLANE_BGRX565 (0x5<<26)
-#define DISPPLANE_BGRX888 (0x6<<26)
-#define DISPPLANE_BGRA888 (0x7<<26)
-#define DISPPLANE_RGBX101010 (0x8<<26)
-#define DISPPLANE_RGBA101010 (0x9<<26)
-#define DISPPLANE_BGRX101010 (0xa<<26)
-#define DISPPLANE_RGBX161616 (0xc<<26)
-#define DISPPLANE_RGBX888 (0xe<<26)
-#define DISPPLANE_RGBA888 (0xf<<26)
-#define DISPPLANE_STEREO_ENABLE (1<<25)
+#define DISPPLANE_PIXFORMAT_MASK (0xf << 26)
+#define DISPPLANE_YUV422 (0x0 << 26)
+#define DISPPLANE_8BPP (0x2 << 26)
+#define DISPPLANE_BGRA555 (0x3 << 26)
+#define DISPPLANE_BGRX555 (0x4 << 26)
+#define DISPPLANE_BGRX565 (0x5 << 26)
+#define DISPPLANE_BGRX888 (0x6 << 26)
+#define DISPPLANE_BGRA888 (0x7 << 26)
+#define DISPPLANE_RGBX101010 (0x8 << 26)
+#define DISPPLANE_RGBA101010 (0x9 << 26)
+#define DISPPLANE_BGRX101010 (0xa << 26)
+#define DISPPLANE_RGBX161616 (0xc << 26)
+#define DISPPLANE_RGBX888 (0xe << 26)
+#define DISPPLANE_RGBA888 (0xf << 26)
+#define DISPPLANE_STEREO_ENABLE (1 << 25)
#define DISPPLANE_STEREO_DISABLE 0
-#define DISPPLANE_PIPE_CSC_ENABLE (1<<24)
+#define DISPPLANE_PIPE_CSC_ENABLE (1 << 24)
#define DISPPLANE_SEL_PIPE_SHIFT 24
-#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT)
-#define DISPPLANE_SEL_PIPE(pipe) ((pipe)<<DISPPLANE_SEL_PIPE_SHIFT)
-#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
+#define DISPPLANE_SEL_PIPE_MASK (3 << DISPPLANE_SEL_PIPE_SHIFT)
+#define DISPPLANE_SEL_PIPE(pipe) ((pipe) << DISPPLANE_SEL_PIPE_SHIFT)
+#define DISPPLANE_SRC_KEY_ENABLE (1 << 22)
#define DISPPLANE_SRC_KEY_DISABLE 0
-#define DISPPLANE_LINE_DOUBLE (1<<20)
+#define DISPPLANE_LINE_DOUBLE (1 << 20)
#define DISPPLANE_NO_LINE_DOUBLE 0
#define DISPPLANE_STEREO_POLARITY_FIRST 0
-#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
-#define DISPPLANE_ALPHA_PREMULTIPLY (1<<16) /* CHV pipe B */
-#define DISPPLANE_ROTATE_180 (1<<15)
-#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
-#define DISPPLANE_TILED (1<<10)
-#define DISPPLANE_MIRROR (1<<8) /* CHV pipe B */
+#define DISPPLANE_STEREO_POLARITY_SECOND (1 << 18)
+#define DISPPLANE_ALPHA_PREMULTIPLY (1 << 16) /* CHV pipe B */
+#define DISPPLANE_ROTATE_180 (1 << 15)
+#define DISPPLANE_TRICKLE_FEED_DISABLE (1 << 14) /* Ironlake */
+#define DISPPLANE_TILED (1 << 10)
+#define DISPPLANE_MIRROR (1 << 8) /* CHV pipe B */
#define _DSPAADDR 0x70184
#define _DSPASTRIDE 0x70188
#define _DSPAPOS 0x7018C /* reserved */
@@ -6015,15 +6172,15 @@ enum {
/* CHV pipe B blender and primary plane */
#define _CHV_BLEND_A 0x60a00
-#define CHV_BLEND_LEGACY (0<<30)
-#define CHV_BLEND_ANDROID (1<<30)
-#define CHV_BLEND_MPO (2<<30)
-#define CHV_BLEND_MASK (3<<30)
+#define CHV_BLEND_LEGACY (0 << 30)
+#define CHV_BLEND_ANDROID (1 << 30)
+#define CHV_BLEND_MPO (2 << 30)
+#define CHV_BLEND_MASK (3 << 30)
#define _CHV_CANVAS_A 0x60a04
#define _PRIMPOS_A 0x60a08
#define _PRIMSIZE_A 0x60a0c
#define _PRIMCNSTALPHA_A 0x60a10
-#define PRIM_CONST_ALPHA_ENABLE (1<<31)
+#define PRIM_CONST_ALPHA_ENABLE (1 << 31)
#define CHV_BLEND(pipe) _MMIO_TRANS2(pipe, _CHV_BLEND_A)
#define CHV_CANVAS(pipe) _MMIO_TRANS2(pipe, _CHV_CANVAS_A)
@@ -6033,8 +6190,8 @@ enum {
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
-#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
-#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
+#define I915_LO_DISPBASE(val) ((val) & ~DISP_BASEADDR_MASK)
+#define I915_HI_DISPBASE(val) ((val) & DISP_BASEADDR_MASK)
/*
* VBIOS flags
@@ -6064,7 +6221,7 @@ enum {
/* Display B control */
#define _DSPBCNTR (dev_priv->info.display_mmio_offset + 0x71180)
-#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
+#define DISPPLANE_ALPHA_TRANS_ENABLE (1 << 15)
#define DISPPLANE_ALPHA_TRANS_DISABLE 0
#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
@@ -6079,27 +6236,27 @@ enum {
/* Sprite A control */
#define _DVSACNTR 0x72180
-#define DVS_ENABLE (1<<31)
-#define DVS_GAMMA_ENABLE (1<<30)
-#define DVS_YUV_RANGE_CORRECTION_DISABLE (1<<27)
-#define DVS_PIXFORMAT_MASK (3<<25)
-#define DVS_FORMAT_YUV422 (0<<25)
-#define DVS_FORMAT_RGBX101010 (1<<25)
-#define DVS_FORMAT_RGBX888 (2<<25)
-#define DVS_FORMAT_RGBX161616 (3<<25)
-#define DVS_PIPE_CSC_ENABLE (1<<24)
-#define DVS_SOURCE_KEY (1<<22)
-#define DVS_RGB_ORDER_XBGR (1<<20)
-#define DVS_YUV_FORMAT_BT709 (1<<18)
-#define DVS_YUV_BYTE_ORDER_MASK (3<<16)
-#define DVS_YUV_ORDER_YUYV (0<<16)
-#define DVS_YUV_ORDER_UYVY (1<<16)
-#define DVS_YUV_ORDER_YVYU (2<<16)
-#define DVS_YUV_ORDER_VYUY (3<<16)
-#define DVS_ROTATE_180 (1<<15)
-#define DVS_DEST_KEY (1<<2)
-#define DVS_TRICKLE_FEED_DISABLE (1<<14)
-#define DVS_TILED (1<<10)
+#define DVS_ENABLE (1 << 31)
+#define DVS_GAMMA_ENABLE (1 << 30)
+#define DVS_YUV_RANGE_CORRECTION_DISABLE (1 << 27)
+#define DVS_PIXFORMAT_MASK (3 << 25)
+#define DVS_FORMAT_YUV422 (0 << 25)
+#define DVS_FORMAT_RGBX101010 (1 << 25)
+#define DVS_FORMAT_RGBX888 (2 << 25)
+#define DVS_FORMAT_RGBX161616 (3 << 25)
+#define DVS_PIPE_CSC_ENABLE (1 << 24)
+#define DVS_SOURCE_KEY (1 << 22)
+#define DVS_RGB_ORDER_XBGR (1 << 20)
+#define DVS_YUV_FORMAT_BT709 (1 << 18)
+#define DVS_YUV_BYTE_ORDER_MASK (3 << 16)
+#define DVS_YUV_ORDER_YUYV (0 << 16)
+#define DVS_YUV_ORDER_UYVY (1 << 16)
+#define DVS_YUV_ORDER_YVYU (2 << 16)
+#define DVS_YUV_ORDER_VYUY (3 << 16)
+#define DVS_ROTATE_180 (1 << 15)
+#define DVS_DEST_KEY (1 << 2)
+#define DVS_TRICKLE_FEED_DISABLE (1 << 14)
+#define DVS_TILED (1 << 10)
#define _DVSALINOFF 0x72184
#define _DVSASTRIDE 0x72188
#define _DVSAPOS 0x7218c
@@ -6111,13 +6268,13 @@ enum {
#define _DVSATILEOFF 0x721a4
#define _DVSASURFLIVE 0x721ac
#define _DVSASCALE 0x72204
-#define DVS_SCALE_ENABLE (1<<31)
-#define DVS_FILTER_MASK (3<<29)
-#define DVS_FILTER_MEDIUM (0<<29)
-#define DVS_FILTER_ENHANCING (1<<29)
-#define DVS_FILTER_SOFTENING (2<<29)
-#define DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
-#define DVS_VERTICAL_OFFSET_ENABLE (1<<27)
+#define DVS_SCALE_ENABLE (1 << 31)
+#define DVS_FILTER_MASK (3 << 29)
+#define DVS_FILTER_MEDIUM (0 << 29)
+#define DVS_FILTER_ENHANCING (1 << 29)
+#define DVS_FILTER_SOFTENING (2 << 29)
+#define DVS_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */
+#define DVS_VERTICAL_OFFSET_ENABLE (1 << 27)
#define _DVSAGAMC 0x72300
#define _DVSBCNTR 0x73180
@@ -6148,31 +6305,31 @@ enum {
#define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
#define _SPRA_CTL 0x70280
-#define SPRITE_ENABLE (1<<31)
-#define SPRITE_GAMMA_ENABLE (1<<30)
-#define SPRITE_YUV_RANGE_CORRECTION_DISABLE (1<<28)
-#define SPRITE_PIXFORMAT_MASK (7<<25)
-#define SPRITE_FORMAT_YUV422 (0<<25)
-#define SPRITE_FORMAT_RGBX101010 (1<<25)
-#define SPRITE_FORMAT_RGBX888 (2<<25)
-#define SPRITE_FORMAT_RGBX161616 (3<<25)
-#define SPRITE_FORMAT_YUV444 (4<<25)
-#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */
-#define SPRITE_PIPE_CSC_ENABLE (1<<24)
-#define SPRITE_SOURCE_KEY (1<<22)
-#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */
-#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19)
-#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 (1<<18) /* 0 is BT601 */
-#define SPRITE_YUV_BYTE_ORDER_MASK (3<<16)
-#define SPRITE_YUV_ORDER_YUYV (0<<16)
-#define SPRITE_YUV_ORDER_UYVY (1<<16)
-#define SPRITE_YUV_ORDER_YVYU (2<<16)
-#define SPRITE_YUV_ORDER_VYUY (3<<16)
-#define SPRITE_ROTATE_180 (1<<15)
-#define SPRITE_TRICKLE_FEED_DISABLE (1<<14)
-#define SPRITE_INT_GAMMA_ENABLE (1<<13)
-#define SPRITE_TILED (1<<10)
-#define SPRITE_DEST_KEY (1<<2)
+#define SPRITE_ENABLE (1 << 31)
+#define SPRITE_GAMMA_ENABLE (1 << 30)
+#define SPRITE_YUV_RANGE_CORRECTION_DISABLE (1 << 28)
+#define SPRITE_PIXFORMAT_MASK (7 << 25)
+#define SPRITE_FORMAT_YUV422 (0 << 25)
+#define SPRITE_FORMAT_RGBX101010 (1 << 25)
+#define SPRITE_FORMAT_RGBX888 (2 << 25)
+#define SPRITE_FORMAT_RGBX161616 (3 << 25)
+#define SPRITE_FORMAT_YUV444 (4 << 25)
+#define SPRITE_FORMAT_XR_BGR101010 (5 << 25) /* Extended range */
+#define SPRITE_PIPE_CSC_ENABLE (1 << 24)
+#define SPRITE_SOURCE_KEY (1 << 22)
+#define SPRITE_RGB_ORDER_RGBX (1 << 20) /* only for 888 and 161616 */
+#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1 << 19)
+#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) /* 0 is BT601 */
+#define SPRITE_YUV_BYTE_ORDER_MASK (3 << 16)
+#define SPRITE_YUV_ORDER_YUYV (0 << 16)
+#define SPRITE_YUV_ORDER_UYVY (1 << 16)
+#define SPRITE_YUV_ORDER_YVYU (2 << 16)
+#define SPRITE_YUV_ORDER_VYUY (3 << 16)
+#define SPRITE_ROTATE_180 (1 << 15)
+#define SPRITE_TRICKLE_FEED_DISABLE (1 << 14)
+#define SPRITE_INT_GAMMA_ENABLE (1 << 13)
+#define SPRITE_TILED (1 << 10)
+#define SPRITE_DEST_KEY (1 << 2)
#define _SPRA_LINOFF 0x70284
#define _SPRA_STRIDE 0x70288
#define _SPRA_POS 0x7028c
@@ -6185,13 +6342,13 @@ enum {
#define _SPRA_OFFSET 0x702a4
#define _SPRA_SURFLIVE 0x702ac
#define _SPRA_SCALE 0x70304
-#define SPRITE_SCALE_ENABLE (1<<31)
-#define SPRITE_FILTER_MASK (3<<29)
-#define SPRITE_FILTER_MEDIUM (0<<29)
-#define SPRITE_FILTER_ENHANCING (1<<29)
-#define SPRITE_FILTER_SOFTENING (2<<29)
-#define SPRITE_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
-#define SPRITE_VERTICAL_OFFSET_ENABLE (1<<27)
+#define SPRITE_SCALE_ENABLE (1 << 31)
+#define SPRITE_FILTER_MASK (3 << 29)
+#define SPRITE_FILTER_MEDIUM (0 << 29)
+#define SPRITE_FILTER_ENHANCING (1 << 29)
+#define SPRITE_FILTER_SOFTENING (2 << 29)
+#define SPRITE_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */
+#define SPRITE_VERTICAL_OFFSET_ENABLE (1 << 27)
#define _SPRA_GAMC 0x70400
#define _SPRB_CTL 0x71280
@@ -6225,28 +6382,28 @@ enum {
#define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
-#define SP_ENABLE (1<<31)
-#define SP_GAMMA_ENABLE (1<<30)
-#define SP_PIXFORMAT_MASK (0xf<<26)
-#define SP_FORMAT_YUV422 (0<<26)
-#define SP_FORMAT_BGR565 (5<<26)
-#define SP_FORMAT_BGRX8888 (6<<26)
-#define SP_FORMAT_BGRA8888 (7<<26)
-#define SP_FORMAT_RGBX1010102 (8<<26)
-#define SP_FORMAT_RGBA1010102 (9<<26)
-#define SP_FORMAT_RGBX8888 (0xe<<26)
-#define SP_FORMAT_RGBA8888 (0xf<<26)
-#define SP_ALPHA_PREMULTIPLY (1<<23) /* CHV pipe B */
-#define SP_SOURCE_KEY (1<<22)
-#define SP_YUV_FORMAT_BT709 (1<<18)
-#define SP_YUV_BYTE_ORDER_MASK (3<<16)
-#define SP_YUV_ORDER_YUYV (0<<16)
-#define SP_YUV_ORDER_UYVY (1<<16)
-#define SP_YUV_ORDER_YVYU (2<<16)
-#define SP_YUV_ORDER_VYUY (3<<16)
-#define SP_ROTATE_180 (1<<15)
-#define SP_TILED (1<<10)
-#define SP_MIRROR (1<<8) /* CHV pipe B */
+#define SP_ENABLE (1 << 31)
+#define SP_GAMMA_ENABLE (1 << 30)
+#define SP_PIXFORMAT_MASK (0xf << 26)
+#define SP_FORMAT_YUV422 (0 << 26)
+#define SP_FORMAT_BGR565 (5 << 26)
+#define SP_FORMAT_BGRX8888 (6 << 26)
+#define SP_FORMAT_BGRA8888 (7 << 26)
+#define SP_FORMAT_RGBX1010102 (8 << 26)
+#define SP_FORMAT_RGBA1010102 (9 << 26)
+#define SP_FORMAT_RGBX8888 (0xe << 26)
+#define SP_FORMAT_RGBA8888 (0xf << 26)
+#define SP_ALPHA_PREMULTIPLY (1 << 23) /* CHV pipe B */
+#define SP_SOURCE_KEY (1 << 22)
+#define SP_YUV_FORMAT_BT709 (1 << 18)
+#define SP_YUV_BYTE_ORDER_MASK (3 << 16)
+#define SP_YUV_ORDER_YUYV (0 << 16)
+#define SP_YUV_ORDER_UYVY (1 << 16)
+#define SP_YUV_ORDER_YVYU (2 << 16)
+#define SP_YUV_ORDER_VYUY (3 << 16)
+#define SP_ROTATE_180 (1 << 15)
+#define SP_TILED (1 << 10)
+#define SP_MIRROR (1 << 8) /* CHV pipe B */
#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c)
@@ -6257,7 +6414,7 @@ enum {
#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0)
#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
-#define SP_CONST_ALPHA_ENABLE (1<<31)
+#define SP_CONST_ALPHA_ENABLE (1 << 31)
#define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0)
#define SP_CONTRAST(x) ((x) << 18) /* u3.6 */
#define SP_BRIGHTNESS(x) ((x) & 0xff) /* s8 */
@@ -6349,40 +6506,40 @@ enum {
* correctly map to the same formats in ICL, as long as bit 23 is set to 0
*/
#define PLANE_CTL_FORMAT_MASK (0xf << 24)
-#define PLANE_CTL_FORMAT_YUV422 ( 0 << 24)
-#define PLANE_CTL_FORMAT_NV12 ( 1 << 24)
-#define PLANE_CTL_FORMAT_XRGB_2101010 ( 2 << 24)
-#define PLANE_CTL_FORMAT_XRGB_8888 ( 4 << 24)
-#define PLANE_CTL_FORMAT_XRGB_16161616F ( 6 << 24)
-#define PLANE_CTL_FORMAT_AYUV ( 8 << 24)
-#define PLANE_CTL_FORMAT_INDEXED ( 12 << 24)
-#define PLANE_CTL_FORMAT_RGB_565 ( 14 << 24)
+#define PLANE_CTL_FORMAT_YUV422 (0 << 24)
+#define PLANE_CTL_FORMAT_NV12 (1 << 24)
+#define PLANE_CTL_FORMAT_XRGB_2101010 (2 << 24)
+#define PLANE_CTL_FORMAT_XRGB_8888 (4 << 24)
+#define PLANE_CTL_FORMAT_XRGB_16161616F (6 << 24)
+#define PLANE_CTL_FORMAT_AYUV (8 << 24)
+#define PLANE_CTL_FORMAT_INDEXED (12 << 24)
+#define PLANE_CTL_FORMAT_RGB_565 (14 << 24)
#define ICL_PLANE_CTL_FORMAT_MASK (0x1f << 23)
#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23) /* Pre-GLK */
#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21)
-#define PLANE_CTL_KEY_ENABLE_SOURCE ( 1 << 21)
-#define PLANE_CTL_KEY_ENABLE_DESTINATION ( 2 << 21)
+#define PLANE_CTL_KEY_ENABLE_SOURCE (1 << 21)
+#define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21)
#define PLANE_CTL_ORDER_BGRX (0 << 20)
#define PLANE_CTL_ORDER_RGBX (1 << 20)
#define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18)
#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16)
-#define PLANE_CTL_YUV422_YUYV ( 0 << 16)
-#define PLANE_CTL_YUV422_UYVY ( 1 << 16)
-#define PLANE_CTL_YUV422_YVYU ( 2 << 16)
-#define PLANE_CTL_YUV422_VYUY ( 3 << 16)
+#define PLANE_CTL_YUV422_YUYV (0 << 16)
+#define PLANE_CTL_YUV422_UYVY (1 << 16)
+#define PLANE_CTL_YUV422_YVYU (2 << 16)
+#define PLANE_CTL_YUV422_VYUY (3 << 16)
#define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15)
#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13) /* Pre-GLK */
#define PLANE_CTL_TILED_MASK (0x7 << 10)
-#define PLANE_CTL_TILED_LINEAR ( 0 << 10)
-#define PLANE_CTL_TILED_X ( 1 << 10)
-#define PLANE_CTL_TILED_Y ( 4 << 10)
-#define PLANE_CTL_TILED_YF ( 5 << 10)
-#define PLANE_CTL_FLIP_HORIZONTAL ( 1 << 8)
+#define PLANE_CTL_TILED_LINEAR (0 << 10)
+#define PLANE_CTL_TILED_X (1 << 10)
+#define PLANE_CTL_TILED_Y (4 << 10)
+#define PLANE_CTL_TILED_YF (5 << 10)
+#define PLANE_CTL_FLIP_HORIZONTAL (1 << 8)
#define PLANE_CTL_ALPHA_MASK (0x3 << 4) /* Pre-GLK */
-#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4)
-#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4)
-#define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4)
+#define PLANE_CTL_ALPHA_DISABLE (0 << 4)
+#define PLANE_CTL_ALPHA_SW_PREMULTIPLY (2 << 4)
+#define PLANE_CTL_ALPHA_HW_PREMULTIPLY (3 << 4)
#define PLANE_CTL_ROTATE_MASK 0x3
#define PLANE_CTL_ROTATE_0 0x0
#define PLANE_CTL_ROTATE_90 0x1
@@ -6610,7 +6767,7 @@ enum {
# define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11)
#define FDI_PLL_FREQ_CTL _MMIO(0x46030)
-#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24)
+#define FDI_PLL_FREQ_CHANGE_REQUEST (1 << 24)
#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00
#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
@@ -6659,14 +6816,14 @@ enum {
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
#define _PFA_CTL_1 0x68080
#define _PFB_CTL_1 0x68880
-#define PF_ENABLE (1<<31)
-#define PF_PIPE_SEL_MASK_IVB (3<<29)
-#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
-#define PF_FILTER_MASK (3<<23)
-#define PF_FILTER_PROGRAMMED (0<<23)
-#define PF_FILTER_MED_3x3 (1<<23)
-#define PF_FILTER_EDGE_ENHANCE (2<<23)
-#define PF_FILTER_EDGE_SOFTEN (3<<23)
+#define PF_ENABLE (1 << 31)
+#define PF_PIPE_SEL_MASK_IVB (3 << 29)
+#define PF_PIPE_SEL_IVB(pipe) ((pipe) << 29)
+#define PF_FILTER_MASK (3 << 23)
+#define PF_FILTER_PROGRAMMED (0 << 23)
+#define PF_FILTER_MED_3x3 (1 << 23)
+#define PF_FILTER_EDGE_ENHANCE (2 << 23)
+#define PF_FILTER_EDGE_SOFTEN (3 << 23)
#define _PFA_WIN_SZ 0x68074
#define _PFB_WIN_SZ 0x68874
#define _PFA_WIN_POS 0x68070
@@ -6684,7 +6841,7 @@ enum {
#define _PSA_CTL 0x68180
#define _PSB_CTL 0x68980
-#define PS_ENABLE (1<<31)
+#define PS_ENABLE (1 << 31)
#define _PSA_WIN_SZ 0x68174
#define _PSB_WIN_SZ 0x68974
#define _PSA_WIN_POS 0x68170
@@ -6769,6 +6926,10 @@ enum {
#define _PS_VPHASE_1B 0x68988
#define _PS_VPHASE_2B 0x68A88
#define _PS_VPHASE_1C 0x69188
+#define PS_Y_PHASE(x) ((x) << 16)
+#define PS_UV_RGB_PHASE(x) ((x) << 0)
+#define PS_PHASE_MASK (0x7fff << 1) /* u2.13 */
+#define PS_PHASE_TRIP (1 << 0)
#define _PS_HPHASE_1A 0x68194
#define _PS_HPHASE_2A 0x68294
@@ -6782,7 +6943,7 @@ enum {
#define _PS_ECC_STAT_2B 0x68AD0
#define _PS_ECC_STAT_1C 0x691D0
-#define _ID(id, a, b) ((a) + (id)*((b)-(a)))
+#define _ID(id, a, b) _PICK_EVEN(id, a, b)
#define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \
_ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \
_ID(id, _PS_1B_CTRL, _PS_2B_CTRL))
@@ -6863,37 +7024,37 @@ enum {
#define DE_PIPEB_CRC_DONE (1 << 10)
#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
#define DE_PIPEA_VBLANK (1 << 7)
-#define DE_PIPE_VBLANK(pipe) (1 << (7 + 8*(pipe)))
+#define DE_PIPE_VBLANK(pipe) (1 << (7 + 8 * (pipe)))
#define DE_PIPEA_EVEN_FIELD (1 << 6)
#define DE_PIPEA_ODD_FIELD (1 << 5)
#define DE_PIPEA_LINE_COMPARE (1 << 4)
#define DE_PIPEA_VSYNC (1 << 3)
#define DE_PIPEA_CRC_DONE (1 << 2)
-#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8*(pipe)))
+#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8 * (pipe)))
#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
-#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8*(pipe)))
+#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8 * (pipe)))
/* More Ivybridge lolz */
-#define DE_ERR_INT_IVB (1<<30)
-#define DE_GSE_IVB (1<<29)
-#define DE_PCH_EVENT_IVB (1<<28)
-#define DE_DP_A_HOTPLUG_IVB (1<<27)
-#define DE_AUX_CHANNEL_A_IVB (1<<26)
-#define DE_EDP_PSR_INT_HSW (1<<19)
-#define DE_SPRITEC_FLIP_DONE_IVB (1<<14)
-#define DE_PLANEC_FLIP_DONE_IVB (1<<13)
-#define DE_PIPEC_VBLANK_IVB (1<<10)
-#define DE_SPRITEB_FLIP_DONE_IVB (1<<9)
-#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
-#define DE_PIPEB_VBLANK_IVB (1<<5)
-#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
-#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
-#define DE_PLANE_FLIP_DONE_IVB(plane) (1<< (3 + 5*(plane)))
-#define DE_PIPEA_VBLANK_IVB (1<<0)
+#define DE_ERR_INT_IVB (1 << 30)
+#define DE_GSE_IVB (1 << 29)
+#define DE_PCH_EVENT_IVB (1 << 28)
+#define DE_DP_A_HOTPLUG_IVB (1 << 27)
+#define DE_AUX_CHANNEL_A_IVB (1 << 26)
+#define DE_EDP_PSR_INT_HSW (1 << 19)
+#define DE_SPRITEC_FLIP_DONE_IVB (1 << 14)
+#define DE_PLANEC_FLIP_DONE_IVB (1 << 13)
+#define DE_PIPEC_VBLANK_IVB (1 << 10)
+#define DE_SPRITEB_FLIP_DONE_IVB (1 << 9)
+#define DE_PLANEB_FLIP_DONE_IVB (1 << 8)
+#define DE_PIPEB_VBLANK_IVB (1 << 5)
+#define DE_SPRITEA_FLIP_DONE_IVB (1 << 4)
+#define DE_PLANEA_FLIP_DONE_IVB (1 << 3)
+#define DE_PLANE_FLIP_DONE_IVB(plane) (1 << (3 + 5 * (plane)))
+#define DE_PIPEA_VBLANK_IVB (1 << 0)
#define DE_PIPE_VBLANK_IVB(pipe) (1 << ((pipe) * 5))
#define VLV_MASTER_IER _MMIO(0x4400c) /* Gunit master IER */
-#define MASTER_INTERRUPT_ENABLE (1<<31)
+#define MASTER_INTERRUPT_ENABLE (1 << 31)
#define DEISR _MMIO(0x44000)
#define DEIMR _MMIO(0x44004)
@@ -6906,37 +7067,37 @@ enum {
#define GTIER _MMIO(0x4401c)
#define GEN8_MASTER_IRQ _MMIO(0x44200)
-#define GEN8_MASTER_IRQ_CONTROL (1<<31)
-#define GEN8_PCU_IRQ (1<<30)
-#define GEN8_DE_PCH_IRQ (1<<23)
-#define GEN8_DE_MISC_IRQ (1<<22)
-#define GEN8_DE_PORT_IRQ (1<<20)
-#define GEN8_DE_PIPE_C_IRQ (1<<18)
-#define GEN8_DE_PIPE_B_IRQ (1<<17)
-#define GEN8_DE_PIPE_A_IRQ (1<<16)
-#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+(pipe)))
-#define GEN8_GT_VECS_IRQ (1<<6)
-#define GEN8_GT_GUC_IRQ (1<<5)
-#define GEN8_GT_PM_IRQ (1<<4)
-#define GEN8_GT_VCS2_IRQ (1<<3)
-#define GEN8_GT_VCS1_IRQ (1<<2)
-#define GEN8_GT_BCS_IRQ (1<<1)
-#define GEN8_GT_RCS_IRQ (1<<0)
+#define GEN8_MASTER_IRQ_CONTROL (1 << 31)
+#define GEN8_PCU_IRQ (1 << 30)
+#define GEN8_DE_PCH_IRQ (1 << 23)
+#define GEN8_DE_MISC_IRQ (1 << 22)
+#define GEN8_DE_PORT_IRQ (1 << 20)
+#define GEN8_DE_PIPE_C_IRQ (1 << 18)
+#define GEN8_DE_PIPE_B_IRQ (1 << 17)
+#define GEN8_DE_PIPE_A_IRQ (1 << 16)
+#define GEN8_DE_PIPE_IRQ(pipe) (1 << (16 + (pipe)))
+#define GEN8_GT_VECS_IRQ (1 << 6)
+#define GEN8_GT_GUC_IRQ (1 << 5)
+#define GEN8_GT_PM_IRQ (1 << 4)
+#define GEN8_GT_VCS2_IRQ (1 << 3)
+#define GEN8_GT_VCS1_IRQ (1 << 2)
+#define GEN8_GT_BCS_IRQ (1 << 1)
+#define GEN8_GT_RCS_IRQ (1 << 0)
#define GEN8_GT_ISR(which) _MMIO(0x44300 + (0x10 * (which)))
#define GEN8_GT_IMR(which) _MMIO(0x44304 + (0x10 * (which)))
#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
#define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which)))
-#define GEN9_GUC_TO_HOST_INT_EVENT (1<<31)
-#define GEN9_GUC_EXEC_ERROR_EVENT (1<<30)
-#define GEN9_GUC_DISPLAY_EVENT (1<<29)
-#define GEN9_GUC_SEMA_SIGNAL_EVENT (1<<28)
-#define GEN9_GUC_IOMMU_MSG_EVENT (1<<27)
-#define GEN9_GUC_DB_RING_EVENT (1<<26)
-#define GEN9_GUC_DMA_DONE_EVENT (1<<25)
-#define GEN9_GUC_FATAL_ERROR_EVENT (1<<24)
-#define GEN9_GUC_NOTIFICATION_EVENT (1<<23)
+#define GEN9_GUC_TO_HOST_INT_EVENT (1 << 31)
+#define GEN9_GUC_EXEC_ERROR_EVENT (1 << 30)
+#define GEN9_GUC_DISPLAY_EVENT (1 << 29)
+#define GEN9_GUC_SEMA_SIGNAL_EVENT (1 << 28)
+#define GEN9_GUC_IOMMU_MSG_EVENT (1 << 27)
+#define GEN9_GUC_DB_RING_EVENT (1 << 26)
+#define GEN9_GUC_DMA_DONE_EVENT (1 << 25)
+#define GEN9_GUC_FATAL_ERROR_EVENT (1 << 24)
+#define GEN9_GUC_NOTIFICATION_EVENT (1 << 23)
#define GEN8_RCS_IRQ_SHIFT 0
#define GEN8_BCS_IRQ_SHIFT 16
@@ -6985,6 +7146,7 @@ enum {
#define GEN8_DE_PORT_IMR _MMIO(0x44444)
#define GEN8_DE_PORT_IIR _MMIO(0x44448)
#define GEN8_DE_PORT_IER _MMIO(0x4444c)
+#define ICL_AUX_CHANNEL_E (1 << 29)
#define CNL_AUX_CHANNEL_F (1 << 28)
#define GEN9_AUX_CHANNEL_D (1 << 27)
#define GEN9_AUX_CHANNEL_C (1 << 26)
@@ -7011,9 +7173,16 @@ enum {
#define GEN8_PCU_IIR _MMIO(0x444e8)
#define GEN8_PCU_IER _MMIO(0x444ec)
+#define GEN11_GU_MISC_ISR _MMIO(0x444f0)
+#define GEN11_GU_MISC_IMR _MMIO(0x444f4)
+#define GEN11_GU_MISC_IIR _MMIO(0x444f8)
+#define GEN11_GU_MISC_IER _MMIO(0x444fc)
+#define GEN11_GU_MISC_GSE (1 << 27)
+
#define GEN11_GFX_MSTR_IRQ _MMIO(0x190010)
#define GEN11_MASTER_IRQ (1 << 31)
#define GEN11_PCU_IRQ (1 << 30)
+#define GEN11_GU_MISC_IRQ (1 << 29)
#define GEN11_DISPLAY_IRQ (1 << 16)
#define GEN11_GT_DW_IRQ(x) (1 << (x))
#define GEN11_GT_DW1_IRQ (1 << 1)
@@ -7024,11 +7193,40 @@ enum {
#define GEN11_AUDIO_CODEC_IRQ (1 << 24)
#define GEN11_DE_PCH_IRQ (1 << 23)
#define GEN11_DE_MISC_IRQ (1 << 22)
+#define GEN11_DE_HPD_IRQ (1 << 21)
#define GEN11_DE_PORT_IRQ (1 << 20)
#define GEN11_DE_PIPE_C (1 << 18)
#define GEN11_DE_PIPE_B (1 << 17)
#define GEN11_DE_PIPE_A (1 << 16)
+#define GEN11_DE_HPD_ISR _MMIO(0x44470)
+#define GEN11_DE_HPD_IMR _MMIO(0x44474)
+#define GEN11_DE_HPD_IIR _MMIO(0x44478)
+#define GEN11_DE_HPD_IER _MMIO(0x4447c)
+#define GEN11_TC4_HOTPLUG (1 << 19)
+#define GEN11_TC3_HOTPLUG (1 << 18)
+#define GEN11_TC2_HOTPLUG (1 << 17)
+#define GEN11_TC1_HOTPLUG (1 << 16)
+#define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC4_HOTPLUG | \
+ GEN11_TC3_HOTPLUG | \
+ GEN11_TC2_HOTPLUG | \
+ GEN11_TC1_HOTPLUG)
+#define GEN11_TBT4_HOTPLUG (1 << 3)
+#define GEN11_TBT3_HOTPLUG (1 << 2)
+#define GEN11_TBT2_HOTPLUG (1 << 1)
+#define GEN11_TBT1_HOTPLUG (1 << 0)
+#define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT4_HOTPLUG | \
+ GEN11_TBT3_HOTPLUG | \
+ GEN11_TBT2_HOTPLUG | \
+ GEN11_TBT1_HOTPLUG)
+
+#define GEN11_TBT_HOTPLUG_CTL _MMIO(0x44030)
+#define GEN11_TC_HOTPLUG_CTL _MMIO(0x44038)
+#define GEN11_HOTPLUG_CTL_ENABLE(tc_port) (8 << (tc_port) * 4)
+#define GEN11_HOTPLUG_CTL_LONG_DETECT(tc_port) (2 << (tc_port) * 4)
+#define GEN11_HOTPLUG_CTL_SHORT_DETECT(tc_port) (1 << (tc_port) * 4)
+#define GEN11_HOTPLUG_CTL_NO_DETECT(tc_port) (0 << (tc_port) * 4)
+
#define GEN11_GT_INTR_DW0 _MMIO(0x190018)
#define GEN11_CSME (31)
#define GEN11_GUNIT (28)
@@ -7043,7 +7241,7 @@ enum {
#define GEN11_VECS(x) (31 - (x))
#define GEN11_VCS(x) (x)
-#define GEN11_GT_INTR_DW(x) _MMIO(0x190018 + (x * 4))
+#define GEN11_GT_INTR_DW(x) _MMIO(0x190018 + ((x) * 4))
#define GEN11_INTR_IDENTITY_REG0 _MMIO(0x190060)
#define GEN11_INTR_IDENTITY_REG1 _MMIO(0x190064)
@@ -7052,12 +7250,12 @@ enum {
#define GEN11_INTR_ENGINE_INSTANCE(x) (((x) & GENMASK(25, 20)) >> 20)
#define GEN11_INTR_ENGINE_INTR(x) ((x) & 0xffff)
-#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + (x * 4))
+#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + ((x) * 4))
#define GEN11_IIR_REG0_SELECTOR _MMIO(0x190070)
#define GEN11_IIR_REG1_SELECTOR _MMIO(0x190074)
-#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + (x * 4))
+#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4))
#define GEN11_RENDER_COPY_INTR_ENABLE _MMIO(0x190030)
#define GEN11_VCS_VECS_INTR_ENABLE _MMIO(0x190034)
@@ -7079,8 +7277,8 @@ enum {
#define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004)
/* Required on all Ironlake and Sandybridge according to the B-Spec. */
#define ILK_ELPIN_409_SELECT (1 << 25)
-#define ILK_DPARB_GATE (1<<22)
-#define ILK_VSDPFD_FULL (1<<21)
+#define ILK_DPARB_GATE (1 << 22)
+#define ILK_VSDPFD_FULL (1 << 21)
#define FUSE_STRAP _MMIO(0x42014)
#define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31)
#define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30)
@@ -7130,31 +7328,31 @@ enum {
#define CHICKEN_TRANS_A 0x420c0
#define CHICKEN_TRANS_B 0x420c4
#define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B)
-#define VSC_DATA_SEL_SOFTWARE_CONTROL (1<<25) /* GLK and CNL+ */
-#define DDI_TRAINING_OVERRIDE_ENABLE (1<<19)
-#define DDI_TRAINING_OVERRIDE_VALUE (1<<18)
-#define DDIE_TRAINING_OVERRIDE_ENABLE (1<<17) /* CHICKEN_TRANS_A only */
-#define DDIE_TRAINING_OVERRIDE_VALUE (1<<16) /* CHICKEN_TRANS_A only */
-#define PSR2_ADD_VERTICAL_LINE_COUNT (1<<15)
-#define PSR2_VSC_ENABLE_PROG_HEADER (1<<12)
+#define VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */
+#define DDI_TRAINING_OVERRIDE_ENABLE (1 << 19)
+#define DDI_TRAINING_OVERRIDE_VALUE (1 << 18)
+#define DDIE_TRAINING_OVERRIDE_ENABLE (1 << 17) /* CHICKEN_TRANS_A only */
+#define DDIE_TRAINING_OVERRIDE_VALUE (1 << 16) /* CHICKEN_TRANS_A only */
+#define PSR2_ADD_VERTICAL_LINE_COUNT (1 << 15)
+#define PSR2_VSC_ENABLE_PROG_HEADER (1 << 12)
#define DISP_ARB_CTL _MMIO(0x45000)
-#define DISP_FBC_MEMORY_WAKE (1<<31)
-#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
-#define DISP_FBC_WM_DIS (1<<15)
+#define DISP_FBC_MEMORY_WAKE (1 << 31)
+#define DISP_TILE_SURFACE_SWIZZLING (1 << 13)
+#define DISP_FBC_WM_DIS (1 << 15)
#define DISP_ARB_CTL2 _MMIO(0x45004)
-#define DISP_DATA_PARTITION_5_6 (1<<6)
-#define DISP_IPC_ENABLE (1<<3)
+#define DISP_DATA_PARTITION_5_6 (1 << 6)
+#define DISP_IPC_ENABLE (1 << 3)
#define DBUF_CTL _MMIO(0x45008)
#define DBUF_CTL_S1 _MMIO(0x45008)
#define DBUF_CTL_S2 _MMIO(0x44FE8)
-#define DBUF_POWER_REQUEST (1<<31)
-#define DBUF_POWER_STATE (1<<30)
+#define DBUF_POWER_REQUEST (1 << 31)
+#define DBUF_POWER_STATE (1 << 30)
#define GEN7_MSG_CTL _MMIO(0x45010)
-#define WAIT_FOR_PCH_RESET_ACK (1<<1)
-#define WAIT_FOR_PCH_FLR_ACK (1<<0)
+#define WAIT_FOR_PCH_RESET_ACK (1 << 1)
+#define WAIT_FOR_PCH_FLR_ACK (1 << 0)
#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
-#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
+#define RESET_PCH_HANDSHAKE_ENABLE (1 << 4)
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
#define SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30)
@@ -7179,16 +7377,16 @@ enum {
#define ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz (2 << 29)
#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0)
-#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1<<14)
+#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1 << 14)
#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4)
-#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
-#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1<<10)
+#define GEN9_TSG_BARRIER_ACK_DISABLE (1 << 8)
+#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1 << 10)
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
-#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1<<0)
+#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1 << 0)
#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1))
#define GEN9_PREEMPT_GPGPU_MID_THREAD_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 0)
#define GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 1)
@@ -7197,22 +7395,27 @@ enum {
/* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
-# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
-# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
-#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
-# define GEN9_PBE_COMPRESSED_HASH_SELECTION (1<<13)
-# define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1<<12)
-# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
-# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
+ #define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1 << 10) | (1 << 26))
+ #define GEN9_RHWO_OPTIMIZATION_DISABLE (1 << 14)
+
+#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
+ #define GEN9_PBE_COMPRESSED_HASH_SELECTION (1 << 13)
+ #define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1 << 12)
+ #define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1 << 8)
+ #define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1 << 0)
+
+#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
+ #define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC (1 << 11)
#define HIZ_CHICKEN _MMIO(0x7018)
-# define CHV_HZ_8X8_MODE_IN_1X (1<<15)
-# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1<<3)
+# define CHV_HZ_8X8_MODE_IN_1X (1 << 15)
+# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1 << 3)
#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308)
-#define DISABLE_PIXEL_MASK_CAMMING (1<<14)
+#define DISABLE_PIXEL_MASK_CAMMING (1 << 14)
#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
+#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11)
#define GEN7_L3SQCREG1 _MMIO(0xB010)
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
@@ -7230,7 +7433,7 @@ enum {
#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
-#define GEN7_L3AGDIS (1<<19)
+#define GEN7_L3AGDIS (1 << 19)
#define GEN7_L3CNTLREG2 _MMIO(0xB020)
#define GEN7_L3CNTLREG3 _MMIO(0xB024)
@@ -7240,7 +7443,7 @@ enum {
#define GEN11_I2M_WRITE_DISABLE (1 << 28)
#define GEN7_L3SQCREG4 _MMIO(0xb034)
-#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
+#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1 << 27)
#define GEN8_L3SQCREG4 _MMIO(0xb118)
#define GEN11_LQSC_CLEAN_EVICT_DISABLE (1 << 6)
@@ -7251,12 +7454,12 @@ enum {
#define HDC_CHICKEN0 _MMIO(0x7300)
#define CNL_HDC_CHICKEN0 _MMIO(0xE5F0)
#define ICL_HDC_MODE _MMIO(0xE5F4)
-#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1<<15)
-#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
-#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
-#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1<<5)
-#define HDC_FORCE_NON_COHERENT (1<<4)
-#define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10)
+#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1 << 15)
+#define HDC_FENCE_DEST_SLM_DISABLE (1 << 14)
+#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1 << 11)
+#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1 << 5)
+#define HDC_FORCE_NON_COHERENT (1 << 4)
+#define HDC_BARRIER_PERFORMANCE_DISABLE (1 << 10)
#define GEN8_HDC_CHICKEN1 _MMIO(0x7304)
@@ -7269,13 +7472,21 @@ enum {
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030)
-#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
+#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1 << 11)
#define HSW_SCRATCH1 _MMIO(0xb038)
-#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
+#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1 << 27)
#define BDW_SCRATCH1 _MMIO(0xb11c)
-#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1<<2)
+#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1 << 2)
+
+/*GEN11 chicken */
+#define _PIPEA_CHICKEN 0x70038
+#define _PIPEB_CHICKEN 0x71038
+#define _PIPEC_CHICKEN 0x72038
+#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7)
+#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
+ _PIPEB_CHICKEN)
/* PCH */
@@ -7320,7 +7531,7 @@ enum {
#define SDE_TRANSA_FIFO_UNDER (1 << 0)
#define SDE_TRANS_MASK (0x3f)
-/* south display engine interrupt: CPT/PPT */
+/* south display engine interrupt: CPT - CNP */
#define SDE_AUDIO_POWER_D_CPT (1 << 31)
#define SDE_AUDIO_POWER_C_CPT (1 << 30)
#define SDE_AUDIO_POWER_B_CPT (1 << 29)
@@ -7368,14 +7579,29 @@ enum {
SDE_FDI_RXB_CPT | \
SDE_FDI_RXA_CPT)
+/* south display engine interrupt: ICP */
+#define SDE_TC4_HOTPLUG_ICP (1 << 27)
+#define SDE_TC3_HOTPLUG_ICP (1 << 26)
+#define SDE_TC2_HOTPLUG_ICP (1 << 25)
+#define SDE_TC1_HOTPLUG_ICP (1 << 24)
+#define SDE_GMBUS_ICP (1 << 23)
+#define SDE_DDIB_HOTPLUG_ICP (1 << 17)
+#define SDE_DDIA_HOTPLUG_ICP (1 << 16)
+#define SDE_DDI_MASK_ICP (SDE_DDIB_HOTPLUG_ICP | \
+ SDE_DDIA_HOTPLUG_ICP)
+#define SDE_TC_MASK_ICP (SDE_TC4_HOTPLUG_ICP | \
+ SDE_TC3_HOTPLUG_ICP | \
+ SDE_TC2_HOTPLUG_ICP | \
+ SDE_TC1_HOTPLUG_ICP)
+
#define SDEISR _MMIO(0xc4000)
#define SDEIMR _MMIO(0xc4004)
#define SDEIIR _MMIO(0xc4008)
#define SDEIER _MMIO(0xc400c)
#define SERR_INT _MMIO(0xc4040)
-#define SERR_INT_POISON (1<<31)
-#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
+#define SERR_INT_POISON (1 << 31)
+#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3))
/* digital port hotplug */
#define PCH_PORT_HOTPLUG _MMIO(0xc4030) /* SHOTPLUG_CTL */
@@ -7428,6 +7654,134 @@ enum {
#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0)
#define PORTE_HOTPLUG_LONG_DETECT (2 << 0)
+/* This register is a reuse of PCH_PORT_HOTPLUG register. The
+ * functionality covered in PCH_PORT_HOTPLUG is split into
+ * SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC.
+ */
+
+#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
+#define ICP_DDIB_HPD_ENABLE (1 << 7)
+#define ICP_DDIB_HPD_STATUS_MASK (3 << 4)
+#define ICP_DDIB_HPD_NO_DETECT (0 << 4)
+#define ICP_DDIB_HPD_SHORT_DETECT (1 << 4)
+#define ICP_DDIB_HPD_LONG_DETECT (2 << 4)
+#define ICP_DDIB_HPD_SHORT_LONG_DETECT (3 << 4)
+#define ICP_DDIA_HPD_ENABLE (1 << 3)
+#define ICP_DDIA_HPD_STATUS_MASK (3 << 0)
+#define ICP_DDIA_HPD_NO_DETECT (0 << 0)
+#define ICP_DDIA_HPD_SHORT_DETECT (1 << 0)
+#define ICP_DDIA_HPD_LONG_DETECT (2 << 0)
+#define ICP_DDIA_HPD_SHORT_LONG_DETECT (3 << 0)
+
+#define SHOTPLUG_CTL_TC _MMIO(0xc4034)
+#define ICP_TC_HPD_ENABLE(tc_port) (8 << (tc_port) * 4)
+/* Icelake DSC Rate Control Range Parameter Registers */
+#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240)
+#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40)
+#define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC)
+#define RC_BPG_OFFSET_SHIFT 10
+#define RC_MAX_QP_SHIFT 5
+#define RC_MIN_QP_SHIFT 0
+
+#define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248)
+#define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48)
+#define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC)
+
+#define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250)
+#define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50)
+#define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC)
+
+#define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258)
+#define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58)
+#define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC)
+
+#define ICP_TC_HPD_LONG_DETECT(tc_port) (2 << (tc_port) * 4)
+#define ICP_TC_HPD_SHORT_DETECT(tc_port) (1 << (tc_port) * 4)
+
#define PCH_GPIOA _MMIO(0xc5010)
#define PCH_GPIOB _MMIO(0xc5014)
#define PCH_GPIOC _MMIO(0xc5018)
@@ -7444,46 +7798,46 @@ enum {
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
-#define PCH_DPLL(pll) _MMIO(pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
+#define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
#define _PCH_FPA0 0xc6040
-#define FP_CB_TUNE (0x3<<22)
+#define FP_CB_TUNE (0x3 << 22)
#define _PCH_FPA1 0xc6044
#define _PCH_FPB0 0xc6048
#define _PCH_FPB1 0xc604c
-#define PCH_FP0(pll) _MMIO(pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
-#define PCH_FP1(pll) _MMIO(pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
+#define PCH_FP0(pll) _MMIO((pll) == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define PCH_FP1(pll) _MMIO((pll) == 0 ? _PCH_FPA1 : _PCH_FPB1)
#define PCH_DPLL_TEST _MMIO(0xc606c)
#define PCH_DREF_CONTROL _MMIO(0xC6200)
#define DREF_CONTROL_MASK 0x7fc3
-#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13)
-#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13)
-#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3<<13)
-#define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13)
-#define DREF_SSC_SOURCE_DISABLE (0<<11)
-#define DREF_SSC_SOURCE_ENABLE (2<<11)
-#define DREF_SSC_SOURCE_MASK (3<<11)
-#define DREF_NONSPREAD_SOURCE_DISABLE (0<<9)
-#define DREF_NONSPREAD_CK505_ENABLE (1<<9)
-#define DREF_NONSPREAD_SOURCE_ENABLE (2<<9)
-#define DREF_NONSPREAD_SOURCE_MASK (3<<9)
-#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7)
-#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7)
-#define DREF_SUPERSPREAD_SOURCE_MASK (3<<7)
-#define DREF_SSC4_DOWNSPREAD (0<<6)
-#define DREF_SSC4_CENTERSPREAD (1<<6)
-#define DREF_SSC1_DISABLE (0<<1)
-#define DREF_SSC1_ENABLE (1<<1)
+#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3 << 13)
+#define DREF_CPU_SOURCE_OUTPUT_MASK (3 << 13)
+#define DREF_SSC_SOURCE_DISABLE (0 << 11)
+#define DREF_SSC_SOURCE_ENABLE (2 << 11)
+#define DREF_SSC_SOURCE_MASK (3 << 11)
+#define DREF_NONSPREAD_SOURCE_DISABLE (0 << 9)
+#define DREF_NONSPREAD_CK505_ENABLE (1 << 9)
+#define DREF_NONSPREAD_SOURCE_ENABLE (2 << 9)
+#define DREF_NONSPREAD_SOURCE_MASK (3 << 9)
+#define DREF_SUPERSPREAD_SOURCE_DISABLE (0 << 7)
+#define DREF_SUPERSPREAD_SOURCE_ENABLE (2 << 7)
+#define DREF_SUPERSPREAD_SOURCE_MASK (3 << 7)
+#define DREF_SSC4_DOWNSPREAD (0 << 6)
+#define DREF_SSC4_CENTERSPREAD (1 << 6)
+#define DREF_SSC1_DISABLE (0 << 1)
+#define DREF_SSC1_ENABLE (1 << 1)
#define DREF_SSC4_DISABLE (0)
#define DREF_SSC4_ENABLE (1)
#define PCH_RAWCLK_FREQ _MMIO(0xc6204)
#define FDL_TP1_TIMER_SHIFT 12
-#define FDL_TP1_TIMER_MASK (3<<12)
+#define FDL_TP1_TIMER_MASK (3 << 12)
#define FDL_TP2_TIMER_SHIFT 10
-#define FDL_TP2_TIMER_MASK (3<<10)
+#define FDL_TP2_TIMER_MASK (3 << 10)
#define RAWCLK_FREQ_MASK 0x3ff
#define CNP_RAWCLK_DIV_MASK (0x3ff << 16)
#define CNP_RAWCLK_DIV(div) ((div) << 16)
@@ -7520,7 +7874,7 @@ enum {
#define TRANS_VBLANK_END_SHIFT 16
#define TRANS_VBLANK_START_SHIFT 0
#define _PCH_TRANS_VSYNC_A 0xe0014
-#define TRANS_VSYNC_END_SHIFT 16
+#define TRANS_VSYNC_END_SHIFT 16
#define TRANS_VSYNC_START_SHIFT 0
#define _PCH_TRANS_VSYNCSHIFT_A 0xe0028
@@ -7600,15 +7954,28 @@ enum {
#define _HSW_VIDEO_DIP_VSC_ECC_B 0x61344
#define _HSW_VIDEO_DIP_GCP_B 0x61210
+/* Icelake PPS_DATA and _ECC DIP Registers.
+ * These are available for transcoders B,C and eDP.
+ * Adding the _A so as to reuse the _MMIO_TRANS2
+ * definition, with which it offsets to the right location.
+ */
+
+#define _ICL_VIDEO_DIP_PPS_DATA_A 0x60350
+#define _ICL_VIDEO_DIP_PPS_DATA_B 0x61350
+#define _ICL_VIDEO_DIP_PPS_ECC_A 0x603D4
+#define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4
+
#define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A)
#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A)
#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
+#define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
+#define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
#define _HSW_STEREO_3D_CTL_A 0x70020
-#define S3D_ENABLE (1<<31)
+#define S3D_ENABLE (1 << 31)
#define _HSW_STEREO_3D_CTL_B 0x71020
#define HSW_STEREO_3D_CTL(trans) _MMIO_PIPE2(trans, _HSW_STEREO_3D_CTL_A)
@@ -7651,156 +8018,156 @@ enum {
#define _PCH_TRANSBCONF 0xf1008
#define PCH_TRANSCONF(pipe) _MMIO_PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
#define LPT_TRANSCONF PCH_TRANSCONF(PIPE_A) /* lpt has only one transcoder */
-#define TRANS_DISABLE (0<<31)
-#define TRANS_ENABLE (1<<31)
-#define TRANS_STATE_MASK (1<<30)
-#define TRANS_STATE_DISABLE (0<<30)
-#define TRANS_STATE_ENABLE (1<<30)
-#define TRANS_FSYNC_DELAY_HB1 (0<<27)
-#define TRANS_FSYNC_DELAY_HB2 (1<<27)
-#define TRANS_FSYNC_DELAY_HB3 (2<<27)
-#define TRANS_FSYNC_DELAY_HB4 (3<<27)
-#define TRANS_INTERLACE_MASK (7<<21)
-#define TRANS_PROGRESSIVE (0<<21)
-#define TRANS_INTERLACED (3<<21)
-#define TRANS_LEGACY_INTERLACED_ILK (2<<21)
-#define TRANS_8BPC (0<<5)
-#define TRANS_10BPC (1<<5)
-#define TRANS_6BPC (2<<5)
-#define TRANS_12BPC (3<<5)
+#define TRANS_DISABLE (0 << 31)
+#define TRANS_ENABLE (1 << 31)
+#define TRANS_STATE_MASK (1 << 30)
+#define TRANS_STATE_DISABLE (0 << 30)
+#define TRANS_STATE_ENABLE (1 << 30)
+#define TRANS_FSYNC_DELAY_HB1 (0 << 27)
+#define TRANS_FSYNC_DELAY_HB2 (1 << 27)
+#define TRANS_FSYNC_DELAY_HB3 (2 << 27)
+#define TRANS_FSYNC_DELAY_HB4 (3 << 27)
+#define TRANS_INTERLACE_MASK (7 << 21)
+#define TRANS_PROGRESSIVE (0 << 21)
+#define TRANS_INTERLACED (3 << 21)
+#define TRANS_LEGACY_INTERLACED_ILK (2 << 21)
+#define TRANS_8BPC (0 << 5)
+#define TRANS_10BPC (1 << 5)
+#define TRANS_6BPC (2 << 5)
+#define TRANS_12BPC (3 << 5)
#define _TRANSA_CHICKEN1 0xf0060
#define _TRANSB_CHICKEN1 0xf1060
#define TRANS_CHICKEN1(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
-#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1<<10)
-#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
+#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1 << 10)
+#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1 << 4)
#define _TRANSA_CHICKEN2 0xf0064
#define _TRANSB_CHICKEN2 0xf1064
#define TRANS_CHICKEN2(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
-#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
-#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29)
-#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3<<27)
-#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1<<26)
-#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1<<25)
+#define TRANS_CHICKEN2_TIMING_OVERRIDE (1 << 31)
+#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1 << 29)
+#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3 << 27)
+#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1 << 26)
+#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1 << 25)
#define SOUTH_CHICKEN1 _MMIO(0xc2000)
#define FDIA_PHASE_SYNC_SHIFT_OVR 19
#define FDIA_PHASE_SYNC_SHIFT_EN 18
-#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
-#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_OVR(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_EN(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
#define FDI_BC_BIFURCATION_SELECT (1 << 12)
#define CHASSIS_CLK_REQ_DURATION_MASK (0xf << 8)
#define CHASSIS_CLK_REQ_DURATION(x) ((x) << 8)
-#define SPT_PWM_GRANULARITY (1<<0)
+#define SPT_PWM_GRANULARITY (1 << 0)
#define SOUTH_CHICKEN2 _MMIO(0xc2004)
-#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
-#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
-#define LPT_PWM_GRANULARITY (1<<5)
-#define DPLS_EDP_PPS_FIX_DIS (1<<0)
+#define FDI_MPHY_IOSFSB_RESET_STATUS (1 << 13)
+#define FDI_MPHY_IOSFSB_RESET_CTL (1 << 12)
+#define LPT_PWM_GRANULARITY (1 << 5)
+#define DPLS_EDP_PPS_FIX_DIS (1 << 0)
#define _FDI_RXA_CHICKEN 0xc200c
#define _FDI_RXB_CHICKEN 0xc2010
-#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1)
-#define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0)
+#define FDI_RX_PHASE_SYNC_POINTER_OVR (1 << 1)
+#define FDI_RX_PHASE_SYNC_POINTER_EN (1 << 0)
#define FDI_RX_CHICKEN(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
#define SOUTH_DSPCLK_GATE_D _MMIO(0xc2020)
-#define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1<<31)
-#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
-#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
-#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
-#define CNP_PWM_CGE_GATING_DISABLE (1<<13)
-#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
+#define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 31)
+#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1 << 30)
+#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1 << 29)
+#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1 << 14)
+#define CNP_PWM_CGE_GATING_DISABLE (1 << 13)
+#define PCH_LP_PARTITION_LEVEL_DISABLE (1 << 12)
/* CPU: FDI_TX */
#define _FDI_TXA_CTL 0x60100
#define _FDI_TXB_CTL 0x61100
#define FDI_TX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
-#define FDI_TX_DISABLE (0<<31)
-#define FDI_TX_ENABLE (1<<31)
-#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
-#define FDI_LINK_TRAIN_PATTERN_2 (1<<28)
-#define FDI_LINK_TRAIN_PATTERN_IDLE (2<<28)
-#define FDI_LINK_TRAIN_NONE (3<<28)
-#define FDI_LINK_TRAIN_VOLTAGE_0_4V (0<<25)
-#define FDI_LINK_TRAIN_VOLTAGE_0_6V (1<<25)
-#define FDI_LINK_TRAIN_VOLTAGE_0_8V (2<<25)
-#define FDI_LINK_TRAIN_VOLTAGE_1_2V (3<<25)
-#define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0<<22)
-#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
-#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22)
-#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22)
+#define FDI_TX_DISABLE (0 << 31)
+#define FDI_TX_ENABLE (1 << 31)
+#define FDI_LINK_TRAIN_PATTERN_1 (0 << 28)
+#define FDI_LINK_TRAIN_PATTERN_2 (1 << 28)
+#define FDI_LINK_TRAIN_PATTERN_IDLE (2 << 28)
+#define FDI_LINK_TRAIN_NONE (3 << 28)
+#define FDI_LINK_TRAIN_VOLTAGE_0_4V (0 << 25)
+#define FDI_LINK_TRAIN_VOLTAGE_0_6V (1 << 25)
+#define FDI_LINK_TRAIN_VOLTAGE_0_8V (2 << 25)
+#define FDI_LINK_TRAIN_VOLTAGE_1_2V (3 << 25)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0 << 22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1 << 22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2 << 22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3 << 22)
/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level.
SNB has different settings. */
/* SNB A-stepping */
-#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
-#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
-#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
-#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02 << 22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01 << 22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0 << 22)
/* SNB B-stepping */
-#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
-#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
-#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
-#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
-#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22)
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0 << 22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a << 22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39 << 22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38 << 22)
+#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f << 22)
#define FDI_DP_PORT_WIDTH_SHIFT 19
#define FDI_DP_PORT_WIDTH_MASK (7 << FDI_DP_PORT_WIDTH_SHIFT)
#define FDI_DP_PORT_WIDTH(width) (((width) - 1) << FDI_DP_PORT_WIDTH_SHIFT)
-#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
+#define FDI_TX_ENHANCE_FRAME_ENABLE (1 << 18)
/* Ironlake: hardwired to 1 */
-#define FDI_TX_PLL_ENABLE (1<<14)
+#define FDI_TX_PLL_ENABLE (1 << 14)
/* Ivybridge has different bits for lolz */
-#define FDI_LINK_TRAIN_PATTERN_1_IVB (0<<8)
-#define FDI_LINK_TRAIN_PATTERN_2_IVB (1<<8)
-#define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2<<8)
-#define FDI_LINK_TRAIN_NONE_IVB (3<<8)
+#define FDI_LINK_TRAIN_PATTERN_1_IVB (0 << 8)
+#define FDI_LINK_TRAIN_PATTERN_2_IVB (1 << 8)
+#define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2 << 8)
+#define FDI_LINK_TRAIN_NONE_IVB (3 << 8)
/* both Tx and Rx */
-#define FDI_COMPOSITE_SYNC (1<<11)
-#define FDI_LINK_TRAIN_AUTO (1<<10)
-#define FDI_SCRAMBLING_ENABLE (0<<7)
-#define FDI_SCRAMBLING_DISABLE (1<<7)
+#define FDI_COMPOSITE_SYNC (1 << 11)
+#define FDI_LINK_TRAIN_AUTO (1 << 10)
+#define FDI_SCRAMBLING_ENABLE (0 << 7)
+#define FDI_SCRAMBLING_DISABLE (1 << 7)
/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
#define _FDI_RXA_CTL 0xf000c
#define _FDI_RXB_CTL 0xf100c
#define FDI_RX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
-#define FDI_RX_ENABLE (1<<31)
+#define FDI_RX_ENABLE (1 << 31)
/* train, dp width same as FDI_TX */
-#define FDI_FS_ERRC_ENABLE (1<<27)
-#define FDI_FE_ERRC_ENABLE (1<<26)
-#define FDI_RX_POLARITY_REVERSED_LPT (1<<16)
-#define FDI_8BPC (0<<16)
-#define FDI_10BPC (1<<16)
-#define FDI_6BPC (2<<16)
-#define FDI_12BPC (3<<16)
-#define FDI_RX_LINK_REVERSAL_OVERRIDE (1<<15)
-#define FDI_DMI_LINK_REVERSE_MASK (1<<14)
-#define FDI_RX_PLL_ENABLE (1<<13)
-#define FDI_FS_ERR_CORRECT_ENABLE (1<<11)
-#define FDI_FE_ERR_CORRECT_ENABLE (1<<10)
-#define FDI_FS_ERR_REPORT_ENABLE (1<<9)
-#define FDI_FE_ERR_REPORT_ENABLE (1<<8)
-#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
-#define FDI_PCDCLK (1<<4)
+#define FDI_FS_ERRC_ENABLE (1 << 27)
+#define FDI_FE_ERRC_ENABLE (1 << 26)
+#define FDI_RX_POLARITY_REVERSED_LPT (1 << 16)
+#define FDI_8BPC (0 << 16)
+#define FDI_10BPC (1 << 16)
+#define FDI_6BPC (2 << 16)
+#define FDI_12BPC (3 << 16)
+#define FDI_RX_LINK_REVERSAL_OVERRIDE (1 << 15)
+#define FDI_DMI_LINK_REVERSE_MASK (1 << 14)
+#define FDI_RX_PLL_ENABLE (1 << 13)
+#define FDI_FS_ERR_CORRECT_ENABLE (1 << 11)
+#define FDI_FE_ERR_CORRECT_ENABLE (1 << 10)
+#define FDI_FS_ERR_REPORT_ENABLE (1 << 9)
+#define FDI_FE_ERR_REPORT_ENABLE (1 << 8)
+#define FDI_RX_ENHANCE_FRAME_ENABLE (1 << 6)
+#define FDI_PCDCLK (1 << 4)
/* CPT */
-#define FDI_AUTO_TRAINING (1<<10)
-#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8)
-#define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8)
-#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
-#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
-#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
+#define FDI_AUTO_TRAINING (1 << 10)
+#define FDI_LINK_TRAIN_PATTERN_1_CPT (0 << 8)
+#define FDI_LINK_TRAIN_PATTERN_2_CPT (1 << 8)
+#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2 << 8)
+#define FDI_LINK_TRAIN_NORMAL_CPT (3 << 8)
+#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3 << 8)
#define _FDI_RXA_MISC 0xf0010
#define _FDI_RXB_MISC 0xf1010
-#define FDI_RX_PWRDN_LANE1_MASK (3<<26)
-#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26)
-#define FDI_RX_PWRDN_LANE0_MASK (3<<24)
-#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24)
-#define FDI_RX_TP1_TO_TP2_48 (2<<20)
-#define FDI_RX_TP1_TO_TP2_64 (3<<20)
-#define FDI_RX_FDI_DELAY_90 (0x90<<0)
+#define FDI_RX_PWRDN_LANE1_MASK (3 << 26)
+#define FDI_RX_PWRDN_LANE1_VAL(x) ((x) << 26)
+#define FDI_RX_PWRDN_LANE0_MASK (3 << 24)
+#define FDI_RX_PWRDN_LANE0_VAL(x) ((x) << 24)
+#define FDI_RX_TP1_TO_TP2_48 (2 << 20)
+#define FDI_RX_TP1_TO_TP2_64 (3 << 20)
+#define FDI_RX_FDI_DELAY_90 (0x90 << 0)
#define FDI_RX_MISC(pipe) _MMIO_PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
#define _FDI_RXA_TUSIZE1 0xf0030
@@ -7811,17 +8178,17 @@ enum {
#define FDI_RX_TUSIZE2(pipe) _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
/* FDI_RX interrupt register format */
-#define FDI_RX_INTER_LANE_ALIGN (1<<10)
-#define FDI_RX_SYMBOL_LOCK (1<<9) /* train 2 */
-#define FDI_RX_BIT_LOCK (1<<8) /* train 1 */
-#define FDI_RX_TRAIN_PATTERN_2_FAIL (1<<7)
-#define FDI_RX_FS_CODE_ERR (1<<6)
-#define FDI_RX_FE_CODE_ERR (1<<5)
-#define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1<<4)
-#define FDI_RX_HDCP_LINK_FAIL (1<<3)
-#define FDI_RX_PIXEL_FIFO_OVERFLOW (1<<2)
-#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1)
-#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0)
+#define FDI_RX_INTER_LANE_ALIGN (1 << 10)
+#define FDI_RX_SYMBOL_LOCK (1 << 9) /* train 2 */
+#define FDI_RX_BIT_LOCK (1 << 8) /* train 1 */
+#define FDI_RX_TRAIN_PATTERN_2_FAIL (1 << 7)
+#define FDI_RX_FS_CODE_ERR (1 << 6)
+#define FDI_RX_FE_CODE_ERR (1 << 5)
+#define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1 << 4)
+#define FDI_RX_HDCP_LINK_FAIL (1 << 3)
+#define FDI_RX_PIXEL_FIFO_OVERFLOW (1 << 2)
+#define FDI_RX_CROSS_CLOCK_OVERFLOW (1 << 1)
+#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1 << 0)
#define _FDI_RXA_IIR 0xf0014
#define _FDI_RXA_IMR 0xf0018
@@ -7867,71 +8234,58 @@ enum {
#define PCH_DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT((aux_ch) - AUX_CH_B, _PCH_DPB_AUX_CH_DATA1, _PCH_DPC_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
/* CPT */
-#define PORT_TRANS_A_SEL_CPT 0
-#define PORT_TRANS_B_SEL_CPT (1<<29)
-#define PORT_TRANS_C_SEL_CPT (2<<29)
-#define PORT_TRANS_SEL_MASK (3<<29)
-#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
-#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30)
-#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29)
-#define SDVO_PORT_TO_PIPE_CHV(val) (((val) & (3<<24)) >> 24)
-#define DP_PORT_TO_PIPE_CHV(val) (((val) & (3<<16)) >> 16)
-
#define _TRANS_DP_CTL_A 0xe0300
#define _TRANS_DP_CTL_B 0xe1300
#define _TRANS_DP_CTL_C 0xe2300
#define TRANS_DP_CTL(pipe) _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B)
-#define TRANS_DP_OUTPUT_ENABLE (1<<31)
-#define TRANS_DP_PORT_SEL_B (0<<29)
-#define TRANS_DP_PORT_SEL_C (1<<29)
-#define TRANS_DP_PORT_SEL_D (2<<29)
-#define TRANS_DP_PORT_SEL_NONE (3<<29)
-#define TRANS_DP_PORT_SEL_MASK (3<<29)
-#define TRANS_DP_PIPE_TO_PORT(val) ((((val) & TRANS_DP_PORT_SEL_MASK) >> 29) + PORT_B)
-#define TRANS_DP_AUDIO_ONLY (1<<26)
-#define TRANS_DP_ENH_FRAMING (1<<18)
-#define TRANS_DP_8BPC (0<<9)
-#define TRANS_DP_10BPC (1<<9)
-#define TRANS_DP_6BPC (2<<9)
-#define TRANS_DP_12BPC (3<<9)
-#define TRANS_DP_BPC_MASK (3<<9)
-#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
+#define TRANS_DP_OUTPUT_ENABLE (1 << 31)
+#define TRANS_DP_PORT_SEL_MASK (3 << 29)
+#define TRANS_DP_PORT_SEL_NONE (3 << 29)
+#define TRANS_DP_PORT_SEL(port) (((port) - PORT_B) << 29)
+#define TRANS_DP_AUDIO_ONLY (1 << 26)
+#define TRANS_DP_ENH_FRAMING (1 << 18)
+#define TRANS_DP_8BPC (0 << 9)
+#define TRANS_DP_10BPC (1 << 9)
+#define TRANS_DP_6BPC (2 << 9)
+#define TRANS_DP_12BPC (3 << 9)
+#define TRANS_DP_BPC_MASK (3 << 9)
+#define TRANS_DP_VSYNC_ACTIVE_HIGH (1 << 4)
#define TRANS_DP_VSYNC_ACTIVE_LOW 0
-#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
+#define TRANS_DP_HSYNC_ACTIVE_HIGH (1 << 3)
#define TRANS_DP_HSYNC_ACTIVE_LOW 0
-#define TRANS_DP_SYNC_MASK (3<<3)
+#define TRANS_DP_SYNC_MASK (3 << 3)
/* SNB eDP training params */
/* SNB A-stepping */
-#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
-#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
-#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
-#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22)
+#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02 << 22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01 << 22)
+#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0 << 22)
/* SNB B-stepping */
-#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0<<22)
-#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1<<22)
-#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a<<22)
-#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39<<22)
-#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
-#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0 << 22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1 << 22)
+#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a << 22)
+#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39 << 22)
+#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38 << 22)
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f << 22)
/* IVB */
-#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22)
-#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22)
-#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22)
-#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
-#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
-#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
-#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22)
+#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 << 22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a << 22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f << 22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 << 22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 << 22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 << 22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e << 22)
/* legacy values */
-#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
-#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22)
-#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22)
-#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22)
-#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22)
+#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 << 22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 << 22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 << 22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 << 22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 << 22)
-#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f << 22)
#define VLV_PMWGICZ _MMIO(0x1300a4)
@@ -7978,7 +8332,7 @@ enum {
#define FORCEWAKE_KERNEL_FALLBACK BIT(15)
#define FORCEWAKE_MT_ACK _MMIO(0x130040)
#define ECOBUS _MMIO(0xa180)
-#define FORCEWAKE_MT_ENABLE (1<<5)
+#define FORCEWAKE_MT_ENABLE (1 << 5)
#define VLV_SPAREG2H _MMIO(0xA194)
#define GEN9_PWRGT_DOMAIN_STATUS _MMIO(0xA2A0)
#define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0)
@@ -7987,13 +8341,13 @@ enum {
#define GTFIFODBG _MMIO(0x120000)
#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20)
#define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13)
-#define GT_FIFO_SBDROPERR (1<<6)
-#define GT_FIFO_BLOBDROPERR (1<<5)
-#define GT_FIFO_SB_READ_ABORTERR (1<<4)
-#define GT_FIFO_DROPERR (1<<3)
-#define GT_FIFO_OVFERR (1<<2)
-#define GT_FIFO_IAWRERR (1<<1)
-#define GT_FIFO_IARDERR (1<<0)
+#define GT_FIFO_SBDROPERR (1 << 6)
+#define GT_FIFO_BLOBDROPERR (1 << 5)
+#define GT_FIFO_SB_READ_ABORTERR (1 << 4)
+#define GT_FIFO_DROPERR (1 << 3)
+#define GT_FIFO_OVFERR (1 << 2)
+#define GT_FIFO_IAWRERR (1 << 1)
+#define GT_FIFO_IARDERR (1 << 0)
#define GTFIFOCTL _MMIO(0x120008)
#define GT_FIFO_FREE_ENTRIES_MASK 0x7f
@@ -8027,37 +8381,37 @@ enum {
# define GEN6_OACSUNIT_CLOCK_GATE_DISABLE (1 << 20)
#define GEN7_UCGCTL4 _MMIO(0x940c)
-#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
-#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1<<14)
+#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1 << 25)
+#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1 << 14)
#define GEN6_RCGCTL1 _MMIO(0x9410)
#define GEN6_RCGCTL2 _MMIO(0x9414)
#define GEN6_RSTCTL _MMIO(0x9420)
#define GEN8_UCGCTL6 _MMIO(0x9430)
-#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1<<24)
-#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
-#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1<<28)
+#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1 << 24)
+#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1 << 14)
+#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1 << 28)
#define GEN6_GFXPAUSE _MMIO(0xA000)
#define GEN6_RPNSWREQ _MMIO(0xA008)
-#define GEN6_TURBO_DISABLE (1<<31)
-#define GEN6_FREQUENCY(x) ((x)<<25)
-#define HSW_FREQUENCY(x) ((x)<<24)
-#define GEN9_FREQUENCY(x) ((x)<<23)
-#define GEN6_OFFSET(x) ((x)<<19)
-#define GEN6_AGGRESSIVE_TURBO (0<<15)
+#define GEN6_TURBO_DISABLE (1 << 31)
+#define GEN6_FREQUENCY(x) ((x) << 25)
+#define HSW_FREQUENCY(x) ((x) << 24)
+#define GEN9_FREQUENCY(x) ((x) << 23)
+#define GEN6_OFFSET(x) ((x) << 19)
+#define GEN6_AGGRESSIVE_TURBO (0 << 15)
#define GEN6_RC_VIDEO_FREQ _MMIO(0xA00C)
#define GEN6_RC_CONTROL _MMIO(0xA090)
-#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16)
-#define GEN6_RC_CTL_RC6p_ENABLE (1<<17)
-#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
-#define GEN6_RC_CTL_RC1e_ENABLE (1<<20)
-#define GEN6_RC_CTL_RC7_ENABLE (1<<22)
-#define VLV_RC_CTL_CTX_RST_PARALLEL (1<<24)
-#define GEN7_RC_CTL_TO_MODE (1<<28)
-#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
-#define GEN6_RC_CTL_HW_ENABLE (1<<31)
+#define GEN6_RC_CTL_RC6pp_ENABLE (1 << 16)
+#define GEN6_RC_CTL_RC6p_ENABLE (1 << 17)
+#define GEN6_RC_CTL_RC6_ENABLE (1 << 18)
+#define GEN6_RC_CTL_RC1e_ENABLE (1 << 20)
+#define GEN6_RC_CTL_RC7_ENABLE (1 << 22)
+#define VLV_RC_CTL_CTX_RST_PARALLEL (1 << 24)
+#define GEN7_RC_CTL_TO_MODE (1 << 28)
+#define GEN6_RC_CTL_EI_MODE(x) ((x) << 27)
+#define GEN6_RC_CTL_HW_ENABLE (1 << 31)
#define GEN6_RP_DOWN_TIMEOUT _MMIO(0xA010)
#define GEN6_RP_INTERRUPT_LIMITS _MMIO(0xA014)
#define GEN6_RPSTAT1 _MMIO(0xA01C)
@@ -8068,19 +8422,19 @@ enum {
#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
#define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT)
#define GEN6_RP_CONTROL _MMIO(0xA024)
-#define GEN6_RP_MEDIA_TURBO (1<<11)
-#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
-#define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9)
-#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2<<9)
-#define GEN6_RP_MEDIA_HW_MODE (1<<9)
-#define GEN6_RP_MEDIA_SW_MODE (0<<9)
-#define GEN6_RP_MEDIA_IS_GFX (1<<8)
-#define GEN6_RP_ENABLE (1<<7)
-#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
-#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
-#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
-#define GEN6_RP_DOWN_IDLE_AVG (0x2<<0)
-#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
+#define GEN6_RP_MEDIA_TURBO (1 << 11)
+#define GEN6_RP_MEDIA_MODE_MASK (3 << 9)
+#define GEN6_RP_MEDIA_HW_TURBO_MODE (3 << 9)
+#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2 << 9)
+#define GEN6_RP_MEDIA_HW_MODE (1 << 9)
+#define GEN6_RP_MEDIA_SW_MODE (0 << 9)
+#define GEN6_RP_MEDIA_IS_GFX (1 << 8)
+#define GEN6_RP_ENABLE (1 << 7)
+#define GEN6_RP_UP_IDLE_MIN (0x1 << 3)
+#define GEN6_RP_UP_BUSY_AVG (0x2 << 3)
+#define GEN6_RP_UP_BUSY_CONT (0x4 << 3)
+#define GEN6_RP_DOWN_IDLE_AVG (0x2 << 0)
+#define