summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.clang-format2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/fsl,imx8mp-hdmi-tx.yaml12
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx8mp-hdmi-pai.yaml69
-rw-r--r--Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/sharp,lq079l1sx01.yaml99
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml2
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml4
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml2
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst12
-rw-r--r--Documentation/gpu/todo.rst37
-rw-r--r--Documentation/gpu/xe/index.rst1
-rw-r--r--Documentation/gpu/xe/xe_exec_queue.rst20
-rw-r--r--Documentation/userspace-api/dma-buf-heaps.rst59
-rw-r--r--MAINTAINERS13
-rw-r--r--drivers/accel/amdxdna/Makefile1
-rw-r--r--drivers/accel/amdxdna/TODO1
-rw-r--r--drivers/accel/amdxdna/aie2_ctx.c167
-rw-r--r--drivers/accel/amdxdna/aie2_error.c95
-rw-r--r--drivers/accel/amdxdna/aie2_message.c59
-rw-r--r--drivers/accel/amdxdna/aie2_msg_priv.h18
-rw-r--r--drivers/accel/amdxdna/aie2_pci.c89
-rw-r--r--drivers/accel/amdxdna/aie2_pci.h11
-rw-r--r--drivers/accel/amdxdna/aie2_smu.c28
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.c97
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.h16
-rw-r--r--drivers/accel/amdxdna/amdxdna_error.h59
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.c50
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.h6
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox.c13
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.c60
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.h3
-rw-r--r--drivers/accel/amdxdna/amdxdna_pm.c94
-rw-r--r--drivers/accel/amdxdna/amdxdna_pm.h18
-rw-r--r--drivers/accel/amdxdna/npu1_regs.c1
-rw-r--r--drivers/accel/amdxdna/npu4_regs.c1
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c38
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c6
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c221
-rw-r--r--drivers/accel/ivpu/ivpu_fw.h14
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c116
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h10
-rw-r--r--drivers/accel/ivpu/ivpu_hw.c59
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h10
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.c2
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.h2
-rw-r--r--drivers/accel/ivpu/ivpu_job.c165
-rw-r--r--drivers/accel/ivpu/ivpu_job.h49
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c2
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c11
-rw-r--r--drivers/accel/ivpu/vpu_jsm_api.h653
-rw-r--r--drivers/accel/qaic/qaic_control.c9
-rw-r--r--drivers/accel/qaic/qaic_data.c96
-rw-r--r--drivers/accel/qaic/qaic_ras.c6
-rw-r--r--drivers/accel/qaic/sahara.c159
-rw-r--r--drivers/dma-buf/heaps/Kconfig10
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c47
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c4
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c31
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c2
-rw-r--r--drivers/gpu/drm/armada/armada_plane.c7
-rw-r--r--drivers/gpu/drm/ast/Makefile3
-rw-r--r--drivers/gpu/drm/ast/ast_2000.c108
-rw-r--r--drivers/gpu/drm/ast/ast_2100.c92
-rw-r--r--drivers/gpu/drm/ast/ast_2200.c92
-rw-r--r--drivers/gpu/drm/ast/ast_2300.c135
-rw-r--r--drivers/gpu/drm/ast/ast_2400.c100
-rw-r--r--drivers/gpu/drm/ast/ast_2500.c106
-rw-r--r--drivers/gpu/drm/ast/ast_2600.c72
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c69
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h100
-rw-r--r--drivers/gpu/drm/ast/ast_main.c268
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c46
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h60
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c21
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c14
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c52
-rw-r--r--drivers/gpu/drm/bridge/imx/Kconfig11
-rw-r--r--drivers/gpu/drm/bridge/imx/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c158
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c65
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c7
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Kconfig8
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c5
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c224
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h14
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c18
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c112
-rw-r--r--drivers/gpu/drm/clients/drm_fbdev_client.c14
-rw-r--r--drivers/gpu/drm/clients/drm_log.c4
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c119
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c76
-rw-r--r--drivers/gpu/drm/drm_atomic.c48
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c8
-rw-r--r--drivers/gpu/drm/drm_bridge.c58
-rw-r--r--drivers/gpu/drm/drm_buddy.c394
-rw-r--r--drivers/gpu/drm/drm_client_event.c16
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c44
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c170
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c30
-rw-r--r--drivers/gpu/drm/drm_format_helper.c91
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c16
-rw-r--r--drivers/gpu/drm/drm_gem_dma_helper.c9
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c114
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c8
-rw-r--r--drivers/gpu/drm/drm_gpusvm.c6
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c2
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c6
-rw-r--r--drivers/gpu/drm/drm_vblank.c172
-rw-r--r--drivers/gpu/drm/drm_vblank_helper.c176
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c2
-rw-r--r--drivers/gpu/drm/gma500/fbdev.c43
-rw-r--r--drivers/gpu/drm/gud/gud_connector.c8
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c12
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_modeset.c11
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c4
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c6
-rw-r--r--drivers/gpu/drm/imx/dc/dc-ed.c8
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fg.c4
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fu.c10
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fu.h4
-rw-r--r--drivers/gpu/drm/imx/dc/dc-lb.c28
-rw-r--r--drivers/gpu/drm/imx/dc/dc-plane.c2
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-plane.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-drm-core.c29
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-tve.c17
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c3
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c4
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c13
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-ipu.c4
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c3
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_layer.c4
-rw-r--r--drivers/gpu/drm/loongson/lsdc_gem.c31
-rw-r--r--drivers/gpu/drm/loongson/lsdc_plane.c2
-rw-r--r--drivers/gpu/drm/mcde/mcde_clk_div.c13
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c27
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.c320
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.c185
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.h18
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c15
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c2
-rw-r--r--drivers/gpu/drm/panel/Kconfig15
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c9
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c1102
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq079l1sx01.c225
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c35
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c71
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c68
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h24
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c242
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_dump.c8
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c8
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c64
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c336
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.h38
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c114
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.h3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c26
-rw-r--r--drivers/gpu/drm/panthor/panthor_devfreq.c3
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.c2
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.h14
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c8
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.c25
-rw-r--r--drivers/gpu/drm/panthor/panthor_regs.h4
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c40
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.h3
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c13
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c29
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c7
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Kconfig2
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c12
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c20
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c77
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c12
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c83
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c1
-rw-r--r--drivers/gpu/drm/scheduler/tests/sched_tests.h3
-rw-r--r--drivers/gpu/drm/sitronix/st7571-i2c.c1
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c86
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c14
-rw-r--r--drivers/gpu/drm/stm/lvds.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c18
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c3
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_helper.h34
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_modeset.c153
-rw-r--r--drivers/gpu/drm/sysfb/simpledrm.c3
-rw-r--r--drivers/gpu/drm/sysfb/vesadrm.c3
-rw-r--r--drivers/gpu/drm/tegra/dc.c2
-rw-r--r--drivers/gpu/drm/tegra/gem.c8
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c4
-rw-r--r--drivers/gpu/drm/tegra/sor.c4
-rw-r--r--drivers/gpu/drm/tests/.kunitconfig2
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c105
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c33
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c10
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c16
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c4
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c8
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c9
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c3
-rw-r--r--drivers/gpu/drm/tiny/bochs.c10
-rw-r--r--drivers/gpu/drm/tiny/cirrus-qemu.c11
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c12
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c60
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c15
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_internal.h2
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c3
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c8
-rw-r--r--drivers/gpu/drm/vc4/Kconfig1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c137
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h1
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c6
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c36
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c87
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c21
-rw-r--r--drivers/gpu/drm/xe/Kconfig.debug16
-rw-r--r--drivers/gpu/drm/xe/Makefile6
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h8
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c4
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c11
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.h4
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_wa.c3
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c4
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h4
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h7
-rw-r--r--drivers/gpu/drm/xe/regs/xe_i2c_regs.h3
-rw-r--r--drivers/gpu/drm/xe/regs/xe_irq_regs.h8
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c17
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c6
-rw-r--r--drivers/gpu/drm/xe/tests/xe_rtp_test.c6
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c88
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h3
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c9
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.c282
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.h8
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c15
-rw-r--r--drivers/gpu/drm/xe/xe_device.c78
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c10
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h15
-rw-r--r--drivers/gpu/drm/xe/xe_device_wa_oob.rules3
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c41
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c28
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c15
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c118
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.h5
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c2
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c70
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.c27
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.h29
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c53
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h19
-rw-r--r--drivers/gpu/drm/xe/xe_gt_clock.c19
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c159
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c7
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c67
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c98
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c436
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_printk.h7
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c470
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.h11
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h34
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c17
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h25
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c269
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.c29
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c371
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.h13
-rw-r--r--drivers/gpu/drm/xe/xe_guc_exec_queue_types.h15
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c47
-rw-r--r--drivers/gpu/drm/xe/xe_guc_relay.c17
-rw-r--r--drivers/gpu/drm/xe/xe_guc_relay_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c604
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.h7
-rw-r--r--drivers/gpu/drm/xe/xe_huc.c10
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c62
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c8
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.c28
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.h4
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c138
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c11
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c20
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.h16
-rw-r--r--drivers/gpu/drm/xe/xe_map.h18
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.c57
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.h2
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c107
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.c16
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.h8
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c3
-rw-r--r--drivers/gpu/drm/xe/xe_pat.c136
-rw-r--r--drivers/gpu/drm/xe/xe_pat.h7
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c264
-rw-r--r--drivers/gpu/drm/xe/xe_pci_sriov.c53
-rw-r--r--drivers/gpu/drm/xe/xe_pci_types.h10
-rw-r--r--drivers/gpu/drm/xe/xe_platform_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c66
-rw-r--r--drivers/gpu/drm/xe/xe_pm.h2
-rw-r--r--drivers/gpu/drm/xe/xe_pmu.c11
-rw-r--r--drivers/gpu/drm/xe/xe_preempt_fence.c11
-rw-r--r--drivers/gpu/drm/xe/xe_psmi.c4
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c2
-rw-r--r--drivers/gpu/drm/xe/xe_query.c2
-rw-r--r--drivers/gpu/drm/xe/xe_reg_whitelist.c3
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c23
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c31
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h30
-rw-r--r--drivers/gpu/drm/xe/xe_rtp_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job_types.h9
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.c2
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.c70
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.h18
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_control.c151
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_control.h17
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c264
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_debugfs.h18
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_helpers.h11
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_provision.c154
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_provision.h31
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_provision_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_printk.h12
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.c243
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.h1
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs.c28
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs.h1
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c14
-rw-r--r--drivers/gpu/drm/xe/xe_tile_debugfs.c19
-rw-r--r--drivers/gpu/drm/xe/xe_tile_debugfs.h3
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.c253
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.h15
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_printk.h33
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf.c112
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf.h9
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h23
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c8
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_sys_mgr.c6
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.c6
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c26
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.h2
-rw-r--r--drivers/gpu/drm/xe/xe_userptr.c4
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c31
-rw-r--r--drivers/gpu/drm/xe/xe_vram.c12
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c27
-rw-r--r--drivers/gpu/drm/xe/xe_wa.h2
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules17
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.c7
-rw-r--r--drivers/gpu/host1x/bus.c12
-rw-r--r--drivers/gpu/host1x/dev.c11
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c106
-rw-r--r--drivers/gpu/host1x/syncpt.c4
-rw-r--r--drivers/video/fbdev/Kconfig8
-rw-r--r--drivers/video/fbdev/core/Kconfig2
-rw-r--r--drivers/video/fbdev/core/bitblit.c122
-rw-r--r--drivers/video/fbdev/core/fbcon.c459
-rw-r--r--drivers/video/fbdev/core/fbcon.h17
-rw-r--r--drivers/video/fbdev/core/fbcon_ccw.c151
-rw-r--r--drivers/video/fbdev/core/fbcon_cw.c151
-rw-r--r--drivers/video/fbdev/core/fbcon_rotate.c47
-rw-r--r--drivers/video/fbdev/core/fbcon_rotate.h18
-rw-r--r--drivers/video/fbdev/core/fbcon_ud.c167
-rw-r--r--drivers/video/fbdev/core/softcursor.c18
-rw-r--r--drivers/video/fbdev/core/tileblit.c32
-rw-r--r--drivers/video/fbdev/simplefb.c6
-rw-r--r--include/drm/bridge/dw_hdmi.h11
-rw-r--r--include/drm/bridge/dw_hdmi_qp.h2
-rw-r--r--include/drm/display/drm_dp.h3
-rw-r--r--include/drm/display/drm_dp_helper.h8
-rw-r--r--include/drm/drm_atomic.h152
-rw-r--r--include/drm/drm_bridge.h61
-rw-r--r--include/drm/drm_buddy.h11
-rw-r--r--include/drm/drm_client.h15
-rw-r--r--include/drm/drm_client_event.h8
-rw-r--r--include/drm/drm_crtc.h2
-rw-r--r--include/drm/drm_dumb_buffers.h14
-rw-r--r--include/drm/drm_format_helper.h4
-rw-r--r--include/drm/drm_gem_shmem_helper.h2
-rw-r--r--include/drm/drm_gpusvm.h4
-rw-r--r--include/drm/drm_modeset_helper_vtables.h12
-rw-r--r--include/drm/drm_vblank.h32
-rw-r--r--include/drm/drm_vblank_helper.h56
-rw-r--r--include/drm/gpu_scheduler.h2
-rw-r--r--include/drm/intel/pciids.h16
-rw-r--r--include/drm/ttm/ttm_bo.h2
-rw-r--r--include/linux/dma-buf/heaps/cma.h16
-rw-r--r--include/sound/asoundef.h9
-rw-r--r--include/uapi/drm/amdxdna_accel.h13
-rw-r--r--include/uapi/drm/drm_mode.h50
-rw-r--r--include/uapi/drm/ivpu_accel.h11
-rw-r--r--include/uapi/drm/panfrost_drm.h50
-rw-r--r--include/uapi/drm/xe_drm.h6
-rw-r--r--kernel/dma/contiguous.c11
437 files changed, 13823 insertions, 5926 deletions
diff --git a/.clang-format b/.clang-format
index f371a13b4d19..ecb5035a3d9d 100644
--- a/.clang-format
+++ b/.clang-format
@@ -167,7 +167,7 @@ ForEachMacros:
- 'drm_connector_for_each_possible_encoder'
- 'drm_exec_for_each_locked_object'
- 'drm_exec_for_each_locked_object_reverse'
- - 'drm_for_each_bridge_in_chain'
+ - 'drm_for_each_bridge_in_chain_scoped'
- 'drm_for_each_connector_iter'
- 'drm_for_each_crtc'
- 'drm_for_each_crtc_reverse'
diff --git a/Documentation/devicetree/bindings/display/bridge/fsl,imx8mp-hdmi-tx.yaml b/Documentation/devicetree/bindings/display/bridge/fsl,imx8mp-hdmi-tx.yaml
index 05442d437755..6211ab8bbb0e 100644
--- a/Documentation/devicetree/bindings/display/bridge/fsl,imx8mp-hdmi-tx.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/fsl,imx8mp-hdmi-tx.yaml
@@ -49,6 +49,10 @@ properties:
$ref: /schemas/graph.yaml#/properties/port
description: HDMI output port
+ port@2:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Parallel audio input port
+
required:
- port@0
- port@1
@@ -98,5 +102,13 @@ examples:
remote-endpoint = <&hdmi0_con>;
};
};
+
+ port@2 {
+ reg = <2>;
+
+ endpoint {
+ remote-endpoint = <&pai_to_hdmi_tx>;
+ };
+ };
};
};
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8mp-hdmi-pai.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8mp-hdmi-pai.yaml
new file mode 100644
index 000000000000..4f99682a308d
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8mp-hdmi-pai.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/imx/fsl,imx8mp-hdmi-pai.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8MP HDMI Parallel Audio Interface
+
+maintainers:
+ - Shengjiu Wang <shengjiu.wang@nxp.com>
+
+description:
+ The HDMI TX Parallel Audio Interface (HTX_PAI) is a bridge between the
+ Audio Subsystem to the HDMI TX Controller.
+
+properties:
+ compatible:
+ const: fsl,imx8mp-hdmi-pai
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: apb
+
+ power-domains:
+ maxItems: 1
+
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Output to the HDMI TX controller.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - power-domains
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx8mp-clock.h>
+ #include <dt-bindings/power/imx8mp-power.h>
+
+ audio-bridge@32fc4800 {
+ compatible = "fsl,imx8mp-hdmi-pai";
+ reg = <0x32fc4800 0x800>;
+ interrupt-parent = <&irqsteer_hdmi>;
+ interrupts = <14>;
+ clocks = <&clk IMX8MP_CLK_HDMI_APB>;
+ clock-names = "apb";
+ power-domains = <&hdmi_blk_ctrl IMX8MP_HDMIBLK_PD_PAI>;
+
+ port {
+ pai_to_hdmi_tx: endpoint {
+ remote-endpoint = <&hdmi_tx_from_pai>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
index 434cc6af9c95..34a612705e8c 100644
--- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
@@ -20,6 +20,7 @@ properties:
- bananapi,lhr050h41
- bestar,bsd1218-a101kl68
- feixin,k101-im2byl02
+ - raspberrypi,dsi-5inch
- raspberrypi,dsi-7inch
- startek,kd050hdfia020
- tdo,tl050hdv35
@@ -30,6 +31,7 @@ properties:
maxItems: 1
backlight: true
+ port: true
power-supply: true
reset-gpios: true
rotation: true
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index 2017428d8828..35ba99b76119 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -184,6 +184,8 @@ properties:
- innolux,n156bge-l21
# Innolux Corporation 7.0" WSVGA (1024x600) TFT LCD panel
- innolux,zj070na-01p
+ # JuTouch Technology Co.. 10" JT101TM023 WXGA (1280 x 800) LVDS panel
+ - jutouch,jt101tm023
# Kaohsiung Opto-Electronics Inc. 5.7" QVGA (320 x 240) TFT LCD panel
- koe,tx14d24vm1bpa
# Kaohsiung Opto-Electronics. TX31D200VM0BAA 12.3" HSXGA LVDS panel
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq079l1sx01.yaml b/Documentation/devicetree/bindings/display/panel/sharp,lq079l1sx01.yaml
new file mode 100644
index 000000000000..08a35ebbbb3c
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/sharp,lq079l1sx01.yaml
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/sharp,lq079l1sx01.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sharp Microelectronics 7.9" WQXGA TFT LCD panel
+
+maintainers:
+ - Svyatoslav Ryhel <clamor95@gmail.com>
+
+description: >
+ This panel requires a dual-channel DSI host to operate and it supports
+ only left-right split mode, where each channel drives the left or right
+ half of the screen and only video mode.
+
+ Each of the DSI channels controls a separate DSI peripheral.
+ The peripheral driven by the first link (DSI-LINK1), left one, is
+ considered the primary peripheral and controls the device.
+
+allOf:
+ - $ref: panel-common-dual.yaml#
+
+properties:
+ compatible:
+ const: sharp,lq079l1sx01
+
+ reg:
+ maxItems: 1
+
+ avdd-supply:
+ description: regulator that supplies the analog voltage
+
+ vddio-supply:
+ description: regulator that supplies the I/O voltage
+
+ vsp-supply:
+ description: positive boost supply regulator
+
+ vsn-supply:
+ description: negative boost supply regulator
+
+ reset-gpios:
+ maxItems: 1
+
+ backlight: true
+ ports: true
+
+required:
+ - compatible
+ - reg
+ - avdd-supply
+ - vddio-supply
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "sharp,lq079l1sx01";
+ reg = <0>;
+
+ reset-gpios = <&gpio 59 GPIO_ACTIVE_LOW>;
+
+ avdd-supply = <&avdd_lcd>;
+ vddio-supply = <&vdd_lcd_io>;
+ vsp-supply = <&vsp_5v5_lcd>;
+ vsn-supply = <&vsn_5v5_lcd>;
+
+ backlight = <&backlight>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ panel_in0: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ panel_in1: endpoint {
+ remote-endpoint = <&dsi1_out>;
+ };
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml
index c59df3c1a3f7..632b48bfabb9 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml
@@ -17,6 +17,7 @@ properties:
- rockchip,px30-mipi-dsi
- rockchip,rk3128-mipi-dsi
- rockchip,rk3288-mipi-dsi
+ - rockchip,rk3368-mipi-dsi
- rockchip,rk3399-mipi-dsi
- rockchip,rk3568-mipi-dsi
- rockchip,rv1126-mipi-dsi
@@ -73,6 +74,7 @@ allOf:
enum:
- rockchip,px30-mipi-dsi
- rockchip,rk3128-mipi-dsi
+ - rockchip,rk3368-mipi-dsi
- rockchip,rk3568-mipi-dsi
- rockchip,rv1126-mipi-dsi
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml
index a5b4e0021758..613040fdb444 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml
@@ -18,6 +18,7 @@ properties:
oneOf:
- items:
- enum:
+ - mediatek,mt8196-mali
- rockchip,rk3588-mali
- const: arm,mali-valhall-csf # Mali Valhall GPU model/revision is fully discoverable
@@ -91,7 +92,6 @@ required:
- interrupts
- interrupt-names
- clocks
- - mali-supply
additionalProperties: false
@@ -108,6 +108,8 @@ allOf:
power-domains:
maxItems: 1
power-domain-names: false
+ required:
+ - mali-supply
examples:
- |
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index f1d1882009ba..3451c9ac0add 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -835,6 +835,8 @@ patternProperties:
description: JOZ BV
"^jty,.*":
description: JTY
+ "^jutouch,.*":
+ description: JuTouch Technology Co., Ltd.
"^kam,.*":
description: Kamstrup A/S
"^karo,.*":
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index 5139705089f2..781129f78b06 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -92,6 +92,18 @@ GEM Atomic Helper Reference
.. kernel-doc:: drivers/gpu/drm/drm_gem_atomic_helper.c
:export:
+VBLANK Helper Reference
+-----------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_vblank_helper.c
+ :doc: overview
+
+.. kernel-doc:: include/drm/drm_vblank_helper.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_vblank_helper.c
+ :export:
+
Simple KMS Helper Reference
===========================
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index b5f58b4274b1..9013ced318cb 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -623,6 +623,43 @@ Contact: Thomas Zimmermann <tzimmermann@suse.de>, Simona Vetter
Level: Advanced
+Implement a new DUMB_CREATE2 ioctl
+----------------------------------
+
+The current DUMB_CREATE ioctl is not well defined. Instead of a pixel and
+framebuffer format, it only accepts a color mode of vague semantics. Assuming
+a linear framebuffer, the color mode gives an idea of the supported pixel
+format. But userspace effectively has to guess the correct values. It really
+only works reliably with framebuffers in XRGB8888. Userspace has begun to
+workaround these limitations by computing arbitrary format's buffer sizes and
+calculating their sizes in terms of XRGB8888 pixels.
+
+One possible solution is a new ioctl DUMB_CREATE2. It should accept a DRM
+format and a format modifier to resolve the color mode's ambiguity. As
+framebuffers can be multi-planar, the new ioctl has to return the buffer size,
+pitch and GEM handle for each individual color plane.
+
+In the first step, the new ioctl can be limited to the current features of
+the existing DUMB_CREATE. Individual drivers can then be extended to support
+multi-planar formats. Rockchip might require this and would be a good candidate.
+
+It might also be helpful to userspace to query information about the size of
+a potential buffer, if allocated. Userspace would supply geometry and format;
+the kernel would return minimal allocation sizes and scanline pitch. There is
+interest to allocate that memory from another device and provide it to the
+DRM driver (say via dma-buf).
+
+Another requested feature is the ability to allocate a buffer by size, without
+format. Accelators use this for their buffer allocation and it could likely be
+generalized.
+
+In addition to the kernel implementation, there must be user-space support
+for the new ioctl. There's code in Mesa that might be able to use the new
+call.
+
+Contact: Thomas Zimmermann <tzimmermann@suse.de>
+
+Level: Advanced
Better Testing
==============
diff --git a/Documentation/gpu/xe/index.rst b/Documentation/gpu/xe/index.rst
index 88b22fad880e..bc432c95d1a3 100644
--- a/Documentation/gpu/xe/index.rst
+++ b/Documentation/gpu/xe/index.rst
@@ -14,6 +14,7 @@ DG2, etc is provided to prototype the driver.
xe_mm
xe_map
xe_migrate
+ xe_exec_queue
xe_cs
xe_pm
xe_gt_freq
diff --git a/Documentation/gpu/xe/xe_exec_queue.rst b/Documentation/gpu/xe/xe_exec_queue.rst
new file mode 100644
index 000000000000..6076569e311c
--- /dev/null
+++ b/Documentation/gpu/xe/xe_exec_queue.rst
@@ -0,0 +1,20 @@
+.. SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+===============
+Execution Queue
+===============
+
+.. kernel-doc:: drivers/gpu/drm/xe/xe_exec_queue.c
+ :doc: Execution Queue
+
+Internal API
+============
+
+.. kernel-doc:: drivers/gpu/drm/xe/xe_exec_queue_types.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/xe/xe_exec_queue.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/xe/xe_exec_queue.c
+ :internal:
diff --git a/Documentation/userspace-api/dma-buf-heaps.rst b/Documentation/userspace-api/dma-buf-heaps.rst
index 1dfe5e7acd5a..05445c83b79a 100644
--- a/Documentation/userspace-api/dma-buf-heaps.rst
+++ b/Documentation/userspace-api/dma-buf-heaps.rst
@@ -16,13 +16,52 @@ following heaps:
- The ``system`` heap allocates virtually contiguous, cacheable, buffers.
- - The ``cma`` heap allocates physically contiguous, cacheable,
- buffers. Only present if a CMA region is present. Such a region is
- usually created either through the kernel commandline through the
- ``cma`` parameter, a memory region Device-Tree node with the
- ``linux,cma-default`` property set, or through the ``CMA_SIZE_MBYTES`` or
- ``CMA_SIZE_PERCENTAGE`` Kconfig options. The heap's name in devtmpfs is
- ``default_cma_region``. For backwards compatibility, when the
- ``DMABUF_HEAPS_CMA_LEGACY`` Kconfig option is set, a duplicate node is
- created following legacy naming conventions; the legacy name might be
- ``reserved``, ``linux,cma``, or ``default-pool``.
+ - The ``default_cma_region`` heap allocates physically contiguous,
+ cacheable, buffers. Only present if a CMA region is present. Such a
+ region is usually created either through the kernel commandline
+ through the ``cma`` parameter, a memory region Device-Tree node with
+ the ``linux,cma-default`` property set, or through the
+ ``CMA_SIZE_MBYTES`` or ``CMA_SIZE_PERCENTAGE`` Kconfig options. Prior
+ to Linux 6.17, its name wasn't stable and could be called
+ ``reserved``, ``linux,cma``, or ``default-pool``, depending on the
+ platform.
+
+ - A heap will be created for each reusable region in the device tree
+ with the ``shared-dma-pool`` compatible, using the full device tree
+ node name as its name. The buffer semantics are identical to
+ ``default-cma-region``.
+
+Naming Convention
+=================
+
+``dma-buf`` heaps name should meet a number of constraints:
+
+- The name must be stable, and must not change from one version to the other.
+ Userspace identifies heaps by their name, so if the names ever change, we
+ would be likely to introduce regressions.
+
+- The name must describe the memory region the heap will allocate from, and
+ must uniquely identify it in a given platform. Since userspace applications
+ use the heap name as the discriminant, it must be able to tell which heap it
+ wants to use reliably if there's multiple heaps.
+
+- The name must not mention implementation details, such as the allocator. The
+ heap driver will change over time, and implementation details when it was
+ introduced might not be relevant in the future.
+
+- The name should describe properties of the buffers that would be allocated.
+ Doing so will make heap identification easier for userspace. Such properties
+ are:
+
+ - ``contiguous`` for physically contiguous buffers;
+
+ - ``protected`` for encrypted buffers not accessible the OS;
+
+- The name may describe intended usage. Doing so will make heap identification
+ easier for userspace applications and users.
+
+For example, assuming a platform with a reserved memory region located
+at the RAM address 0x42000000, intended to allocate video framebuffers,
+physically contiguous, and backed by the CMA kernel allocator, good
+names would be ``memory@42000000-contiguous`` or ``video@42000000``, but
+``cma-video`` wouldn't.
diff --git a/MAINTAINERS b/MAINTAINERS
index 545a4776795e..096fcca26dc7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2092,7 +2092,8 @@ F: drivers/gpu/drm/arm/display/komeda/
ARM MALI PANFROST DRM DRIVER
M: Boris Brezillon <boris.brezillon@collabora.com>
M: Rob Herring <robh@kernel.org>
-R: Steven Price <steven.price@arm.com>
+M: Steven Price <steven.price@arm.com>
+M: Adrián Larumbe <adrian.larumbe@collabora.com>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
@@ -7309,6 +7310,7 @@ F: Documentation/userspace-api/dma-buf-alloc-exchange.rst
F: drivers/dma-buf/
F: include/linux/*fence.h
F: include/linux/dma-buf.h
+F: include/linux/dma-buf/
F: include/linux/dma-resv.h
K: \bdma_(?:buf|fence|resv)\b
@@ -7637,7 +7639,6 @@ F: drivers/accel/
F: include/drm/drm_accel.h
DRM DRIVER FOR ALLWINNER DE2 AND DE3 ENGINE
-M: Maxime Ripard <mripard@kernel.org>
M: Chen-Yu Tsai <wens@csie.org>
R: Jernej Skrabec <jernej.skrabec@gmail.com>
L: dri-devel@lists.freedesktop.org
@@ -7747,7 +7748,8 @@ F: Documentation/devicetree/bindings/display/panel/panel-edp.yaml
F: drivers/gpu/drm/panel/panel-edp.c
DRM DRIVER FOR GENERIC USB DISPLAY
-S: Orphan
+M: Ruben Wauters <rubenru09@aol.com>
+S: Maintained
W: https://github.com/notro/gud/wiki
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/gud/
@@ -7888,7 +7890,7 @@ DRM DRIVER for Qualcomm display hardware
M: Rob Clark <robin.clark@oss.qualcomm.com>
M: Dmitry Baryshkov <lumag@kernel.org>
R: Abhinav Kumar <abhinav.kumar@linux.dev>
-R: Jessica Zhang <jessica.zhang@oss.qualcomm.com>
+R: Jessica Zhang <jesszhan0024@gmail.com>
R: Sean Paul <sean@poorly.run>
R: Marijn Suijten <marijn.suijten@somainline.org>
L: linux-arm-msm@vger.kernel.org
@@ -8251,7 +8253,6 @@ F: drivers/gpu/nova-core/
F: rust/kernel/drm/
DRM DRIVERS FOR ALLWINNER A10
-M: Maxime Ripard <mripard@kernel.org>
M: Chen-Yu Tsai <wens@csie.org>
L: dri-devel@lists.freedesktop.org
S: Supported
@@ -8601,7 +8602,7 @@ F: drivers/gpu/drm/clients/drm_log.c
DRM PANEL DRIVERS
M: Neil Armstrong <neil.armstrong@linaro.org>
-R: Jessica Zhang <jessica.zhang@oss.qualcomm.com>
+R: Jessica Zhang <jesszhan0024@gmail.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
diff --git a/drivers/accel/amdxdna/Makefile b/drivers/accel/amdxdna/Makefile
index 6797dac65efa..6344aaf523fa 100644
--- a/drivers/accel/amdxdna/Makefile
+++ b/drivers/accel/amdxdna/Makefile
@@ -14,6 +14,7 @@ amdxdna-y := \
amdxdna_mailbox.o \
amdxdna_mailbox_helper.o \
amdxdna_pci_drv.o \
+ amdxdna_pm.o \
amdxdna_sysfs.o \
amdxdna_ubuf.o \
npu1_regs.o \
diff --git a/drivers/accel/amdxdna/TODO b/drivers/accel/amdxdna/TODO
index ad8ac6e315b6..0e4bbebeaedf 100644
--- a/drivers/accel/amdxdna/TODO
+++ b/drivers/accel/amdxdna/TODO
@@ -1,2 +1 @@
- Add debugfs support
-- Add debug BO support
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
index e9f9b1fa5dc1..63450b7773ac 100644
--- a/drivers/accel/amdxdna/aie2_ctx.c
+++ b/drivers/accel/amdxdna/aie2_ctx.c
@@ -21,6 +21,7 @@
#include "amdxdna_gem.h"
#include "amdxdna_mailbox.h"
#include "amdxdna_pci_drv.h"
+#include "amdxdna_pm.h"
static bool force_cmdlist;
module_param(force_cmdlist, bool, 0600);
@@ -88,7 +89,7 @@ static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hw
goto out;
}
- ret = aie2_config_cu(hwctx);
+ ret = aie2_config_cu(hwctx, NULL);
if (ret) {
XDNA_ERR(xdna, "Config cu failed, ret %d", ret);
goto out;
@@ -167,14 +168,11 @@ static int aie2_hwctx_resume_cb(struct amdxdna_hwctx *hwctx, void *arg)
int aie2_hwctx_resume(struct amdxdna_client *client)
{
- struct amdxdna_dev *xdna = client->xdna;
-
/*
* The resume path cannot guarantee that mailbox channel can be
* regenerated. If this happen, when submit message to this
* mailbox channel, error will return.
*/
- drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
return amdxdna_hwctx_walk(client, NULL, aie2_hwctx_resume_cb);
}
@@ -184,6 +182,8 @@ aie2_sched_notify(struct amdxdna_sched_job *job)
struct dma_fence *fence = job->fence;
trace_xdna_job(&job->base, job->hwctx->name, "signaled fence", job->seq);
+
+ amdxdna_pm_suspend_put(job->hwctx->client->xdna);
job->hwctx->priv->completed++;
dma_fence_signal(fence);
@@ -226,11 +226,10 @@ out:
}
static int
-aie2_sched_nocmd_resp_handler(void *handle, void __iomem *data, size_t size)
+aie2_sched_drvcmd_resp_handler(void *handle, void __iomem *data, size_t size)
{
struct amdxdna_sched_job *job = handle;
int ret = 0;
- u32 status;
if (unlikely(!data))
goto out;
@@ -240,8 +239,7 @@ aie2_sched_nocmd_resp_handler(void *handle, void __iomem *data, size_t size)
goto out;
}
- status = readl(data);
- XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
+ job->drv_cmd->result = readl(data);
out:
aie2_sched_notify(job);
@@ -314,8 +312,18 @@ aie2_sched_job_run(struct drm_sched_job *sched_job)
kref_get(&job->refcnt);
fence = dma_fence_get(job->fence);
- if (unlikely(!cmd_abo)) {
- ret = aie2_sync_bo(hwctx, job, aie2_sched_nocmd_resp_handler);
+ if (job->drv_cmd) {
+ switch (job->drv_cmd->opcode) {
+ case SYNC_DEBUG_BO:
+ ret = aie2_sync_bo(hwctx, job, aie2_sched_drvcmd_resp_handler);
+ break;
+ case ATTACH_DEBUG_BO:
+ ret = aie2_config_debug_bo(hwctx, job, aie2_sched_drvcmd_resp_handler);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
goto out;
}
@@ -531,7 +539,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
.num_rqs = DRM_SCHED_PRIORITY_COUNT,
.credit_limit = HWCTX_MAX_CMDS,
.timeout = msecs_to_jiffies(HWCTX_MAX_TIMEOUT),
- .name = hwctx->name,
+ .name = "amdxdna_js",
.dev = xdna->ddev.dev,
};
struct drm_gpu_scheduler *sched;
@@ -610,10 +618,14 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
goto free_entity;
}
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto free_col_list;
+
ret = aie2_alloc_resource(hwctx);
if (ret) {
XDNA_ERR(xdna, "Alloc hw resource failed, ret %d", ret);
- goto free_col_list;
+ goto suspend_put;
}
ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
@@ -628,6 +640,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
XDNA_ERR(xdna, "Create syncobj failed, ret %d", ret);
goto release_resource;
}
+ amdxdna_pm_suspend_put(xdna);
hwctx->status = HWCTX_STAT_INIT;
ndev = xdna->dev_handle;
@@ -640,6 +653,8 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
release_resource:
aie2_release_resource(hwctx);
+suspend_put:
+ amdxdna_pm_suspend_put(xdna);
free_col_list:
kfree(hwctx->col_list);
free_entity:
@@ -697,6 +712,14 @@ void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx)
kfree(hwctx->cus);
}
+static int aie2_config_cu_resp_handler(void *handle, void __iomem *data, size_t size)
+{
+ struct amdxdna_hwctx *hwctx = handle;
+
+ amdxdna_pm_suspend_put(hwctx->client->xdna);
+ return 0;
+}
+
static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size)
{
struct amdxdna_hwctx_param_config_cu *config = buf;
@@ -728,10 +751,14 @@ static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size
if (!hwctx->cus)
return -ENOMEM;
- ret = aie2_config_cu(hwctx);
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto free_cus;
+
+ ret = aie2_config_cu(hwctx, aie2_config_cu_resp_handler);
if (ret) {
XDNA_ERR(xdna, "Config CU to firmware failed, ret %d", ret);
- goto free_cus;
+ goto pm_suspend_put;
}
wmb(); /* To avoid locking in command submit when check status */
@@ -739,12 +766,82 @@ static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size
return 0;
+pm_suspend_put:
+ amdxdna_pm_suspend_put(xdna);
free_cus:
kfree(hwctx->cus);
hwctx->cus = NULL;
return ret;
}
+static void aie2_cmd_wait(struct amdxdna_hwctx *hwctx, u64 seq)
+{
+ struct dma_fence *out_fence = aie2_cmd_get_out_fence(hwctx, seq);
+
+ if (!out_fence) {
+ XDNA_ERR(hwctx->client->xdna, "Failed to get fence");
+ return;
+ }
+
+ dma_fence_wait_timeout(out_fence, false, MAX_SCHEDULE_TIMEOUT);
+ dma_fence_put(out_fence);
+}
+
+static int aie2_hwctx_cfg_debug_bo(struct amdxdna_hwctx *hwctx, u32 bo_hdl,
+ bool attach)
+{
+ struct amdxdna_client *client = hwctx->client;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_drv_cmd cmd = { 0 };
+ struct amdxdna_gem_obj *abo;
+ u64 seq;
+ int ret;
+
+ abo = amdxdna_gem_get_obj(client, bo_hdl, AMDXDNA_BO_DEV);
+ if (!abo) {
+ XDNA_ERR(xdna, "Get bo %d failed", bo_hdl);
+ return -EINVAL;
+ }
+
+ if (attach) {
+ if (abo->assigned_hwctx != AMDXDNA_INVALID_CTX_HANDLE) {
+ ret = -EBUSY;
+ goto put_obj;
+ }
+ cmd.opcode = ATTACH_DEBUG_BO;
+ } else {
+ if (abo->assigned_hwctx != hwctx->id) {
+ ret = -EINVAL;
+ goto put_obj;
+ }
+ cmd.opcode = DETACH_DEBUG_BO;
+ }
+
+ ret = amdxdna_cmd_submit(client, &cmd, AMDXDNA_INVALID_BO_HANDLE,
+ &bo_hdl, 1, hwctx->id, &seq);
+ if (ret) {
+ XDNA_ERR(xdna, "Submit command failed");
+ goto put_obj;
+ }
+
+ aie2_cmd_wait(hwctx, seq);
+ if (cmd.result) {
+ XDNA_ERR(xdna, "Response failure 0x%x", cmd.result);
+ goto put_obj;
+ }
+
+ if (attach)
+ abo->assigned_hwctx = hwctx->id;
+ else
+ abo->assigned_hwctx = AMDXDNA_INVALID_CTX_HANDLE;
+
+ XDNA_DBG(xdna, "Config debug BO %d to %s", bo_hdl, hwctx->name);
+
+put_obj:
+ amdxdna_gem_put_obj(abo);
+ return ret;
+}
+
int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size)
{
struct amdxdna_dev *xdna = hwctx->client->xdna;
@@ -754,14 +851,40 @@ int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *bu
case DRM_AMDXDNA_HWCTX_CONFIG_CU:
return aie2_hwctx_cu_config(hwctx, buf, size);
case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
+ return aie2_hwctx_cfg_debug_bo(hwctx, (u32)value, true);
case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
- return -EOPNOTSUPP;
+ return aie2_hwctx_cfg_debug_bo(hwctx, (u32)value, false);
default:
XDNA_DBG(xdna, "Not supported type %d", type);
return -EOPNOTSUPP;
}
}
+int aie2_hwctx_sync_debug_bo(struct amdxdna_hwctx *hwctx, u32 debug_bo_hdl)
+{
+ struct amdxdna_client *client = hwctx->client;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_drv_cmd cmd = { 0 };
+ u64 seq;
+ int ret;
+
+ cmd.opcode = SYNC_DEBUG_BO;
+ ret = amdxdna_cmd_submit(client, &cmd, AMDXDNA_INVALID_BO_HANDLE,
+ &debug_bo_hdl, 1, hwctx->id, &seq);
+ if (ret) {
+ XDNA_ERR(xdna, "Submit command failed");
+ return ret;
+ }
+
+ aie2_cmd_wait(hwctx, seq);
+ if (cmd.result) {
+ XDNA_ERR(xdna, "Response failure 0x%x", cmd.result);
+ return ret;
+ }
+
+ return 0;
+}
+
static int aie2_populate_range(struct amdxdna_gem_obj *abo)
{
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
@@ -862,11 +985,15 @@ int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
goto free_chain;
}
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto cleanup_job;
+
retry:
ret = drm_gem_lock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
if (ret) {
XDNA_WARN(xdna, "Failed to lock BOs, ret %d", ret);
- goto cleanup_job;
+ goto suspend_put;
}
for (i = 0; i < job->bo_cnt; i++) {
@@ -874,7 +1001,7 @@ retry:
if (ret) {
XDNA_WARN(xdna, "Failed to reserve fences %d", ret);
drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
- goto cleanup_job;
+ goto suspend_put;
}
}
@@ -889,12 +1016,12 @@ retry:
msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
} else if (time_after(jiffies, timeout)) {
ret = -ETIME;
- goto cleanup_job;
+ goto suspend_put;
}
ret = aie2_populate_range(abo);
if (ret)
- goto cleanup_job;
+ goto suspend_put;
goto retry;
}
}
@@ -920,6 +1047,8 @@ retry:
return 0;
+suspend_put:
+ amdxdna_pm_suspend_put(xdna);
cleanup_job:
drm_sched_job_cleanup(&job->base);
free_chain:
diff --git a/drivers/accel/amdxdna/aie2_error.c b/drivers/accel/amdxdna/aie2_error.c
index 5ee905632a39..d452008ec4f4 100644
--- a/drivers/accel/amdxdna/aie2_error.c
+++ b/drivers/accel/amdxdna/aie2_error.c
@@ -13,6 +13,7 @@
#include "aie2_msg_priv.h"
#include "aie2_pci.h"
+#include "amdxdna_error.h"
#include "amdxdna_mailbox.h"
#include "amdxdna_pci_drv.h"
@@ -46,6 +47,7 @@ enum aie_module_type {
AIE_MEM_MOD = 0,
AIE_CORE_MOD,
AIE_PL_MOD,
+ AIE_UNKNOWN_MOD,
};
enum aie_error_category {
@@ -143,6 +145,31 @@ static const struct aie_event_category aie_ml_shim_tile_event_cat[] = {
EVENT_CATEGORY(74U, AIE_ERROR_LOCK),
};
+static const enum amdxdna_error_num aie_cat_err_num_map[] = {
+ [AIE_ERROR_SATURATION] = AMDXDNA_ERROR_NUM_AIE_SATURATION,
+ [AIE_ERROR_FP] = AMDXDNA_ERROR_NUM_AIE_FP,
+ [AIE_ERROR_STREAM] = AMDXDNA_ERROR_NUM_AIE_STREAM,
+ [AIE_ERROR_ACCESS] = AMDXDNA_ERROR_NUM_AIE_ACCESS,
+ [AIE_ERROR_BUS] = AMDXDNA_ERROR_NUM_AIE_BUS,
+ [AIE_ERROR_INSTRUCTION] = AMDXDNA_ERROR_NUM_AIE_INSTRUCTION,
+ [AIE_ERROR_ECC] = AMDXDNA_ERROR_NUM_AIE_ECC,
+ [AIE_ERROR_LOCK] = AMDXDNA_ERROR_NUM_AIE_LOCK,
+ [AIE_ERROR_DMA] = AMDXDNA_ERROR_NUM_AIE_DMA,
+ [AIE_ERROR_MEM_PARITY] = AMDXDNA_ERROR_NUM_AIE_MEM_PARITY,
+ [AIE_ERROR_UNKNOWN] = AMDXDNA_ERROR_NUM_UNKNOWN,
+};
+
+static_assert(ARRAY_SIZE(aie_cat_err_num_map) == AIE_ERROR_UNKNOWN + 1);
+
+static const enum amdxdna_error_module aie_err_mod_map[] = {
+ [AIE_MEM_MOD] = AMDXDNA_ERROR_MODULE_AIE_MEMORY,
+ [AIE_CORE_MOD] = AMDXDNA_ERROR_MODULE_AIE_CORE,
+ [AIE_PL_MOD] = AMDXDNA_ERROR_MODULE_AIE_PL,
+ [AIE_UNKNOWN_MOD] = AMDXDNA_ERROR_MODULE_UNKNOWN,
+};
+
+static_assert(ARRAY_SIZE(aie_err_mod_map) == AIE_UNKNOWN_MOD + 1);
+
static enum aie_error_category
aie_get_error_category(u8 row, u8 event_id, enum aie_module_type mod_type)
{
@@ -176,12 +203,40 @@ aie_get_error_category(u8 row, u8 event_id, enum aie_module_type mod_type)
if (event_id != lut[i].event_id)
continue;
+ if (lut[i].category > AIE_ERROR_UNKNOWN)
+ return AIE_ERROR_UNKNOWN;
+
return lut[i].category;
}
return AIE_ERROR_UNKNOWN;
}
+static void aie2_update_last_async_error(struct amdxdna_dev_hdl *ndev, void *err_info, u32 num_err)
+{
+ struct aie_error *errs = err_info;
+ enum amdxdna_error_module err_mod;
+ enum aie_error_category aie_err;
+ enum amdxdna_error_num err_num;
+ struct aie_error *last_err;
+
+ last_err = &errs[num_err - 1];
+ if (last_err->mod_type >= AIE_UNKNOWN_MOD) {
+ err_num = aie_cat_err_num_map[AIE_ERROR_UNKNOWN];
+ err_mod = aie_err_mod_map[AIE_UNKNOWN_MOD];
+ } else {
+ aie_err = aie_get_error_category(last_err->row,
+ last_err->event_id,
+ last_err->mod_type);
+ err_num = aie_cat_err_num_map[aie_err];
+ err_mod = aie_err_mod_map[last_err->mod_type];
+ }
+
+ ndev->last_async_err.err_code = AMDXDNA_ERROR_ENCODE(err_num, err_mod);
+ ndev->last_async_err.ts_us = ktime_to_us(ktime_get_real());
+ ndev->last_async_err.ex_err_code = AMDXDNA_EXTRA_ERR_ENCODE(last_err->row, last_err->col);
+}
+
static u32 aie2_error_backtrack(struct amdxdna_dev_hdl *ndev, void *err_info, u32 num_err)
{
struct aie_error *errs = err_info;
@@ -264,29 +319,14 @@ static void aie2_error_worker(struct work_struct *err_work)
}
mutex_lock(&xdna->dev_lock);
+ aie2_update_last_async_error(e->ndev, info->payload, info->err_cnt);
+
/* Re-sent this event to firmware */
if (aie2_error_event_send(e))
XDNA_WARN(xdna, "Unable to register async event");
mutex_unlock(&xdna->dev_lock);
}
-int aie2_error_async_events_send(struct amdxdna_dev_hdl *ndev)
-{
- struct amdxdna_dev *xdna = ndev->xdna;
- struct async_event *e;
- int i, ret;
-
- drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- for (i = 0; i < ndev->async_events->event_cnt; i++) {
- e = &ndev->async_events->event[i];
- ret = aie2_error_event_send(e);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
void aie2_error_async_events_free(struct amdxdna_dev_hdl *ndev)
{
struct amdxdna_dev *xdna = ndev->xdna;
@@ -341,6 +381,10 @@ int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev)
e->size = ASYNC_BUF_SIZE;
e->resp.status = MAX_AIE2_STATUS_CODE;
INIT_WORK(&e->work, aie2_error_worker);
+
+ ret = aie2_error_event_send(e);
+ if (ret)
+ goto free_wq;
}
ndev->async_events = events;
@@ -349,6 +393,8 @@ int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev)
events->event_cnt, events->size);
return 0;
+free_wq:
+ destroy_workqueue(events->wq);
free_buf:
dma_free_noncoherent(xdna->ddev.dev, events->size, events->buf,
events->addr, DMA_FROM_DEVICE);
@@ -356,3 +402,18 @@ free_events:
kfree(events);
return ret;
}
+
+int aie2_get_array_async_error(struct amdxdna_dev_hdl *ndev, struct amdxdna_drm_get_array *args)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ args->num_element = 1;
+ args->element_size = sizeof(ndev->last_async_err);
+ if (copy_to_user(u64_to_user_ptr(args->buffer),
+ &ndev->last_async_err, args->element_size))
+ return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c
index 9caad083543d..0ec1dc6fe668 100644
--- a/drivers/accel/amdxdna/aie2_message.c
+++ b/drivers/accel/amdxdna/aie2_message.c
@@ -37,7 +37,7 @@ static int aie2_send_mgmt_msg_wait(struct amdxdna_dev_hdl *ndev,
if (!ndev->mgmt_chann)
return -ENODEV;
- drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ drm_WARN_ON(&xdna->ddev, xdna->rpm_on && !mutex_is_locked(&xdna->dev_lock));
ret = xdna_send_msg_wait(xdna, ndev->mgmt_chann, msg);
if (ret == -ETIME) {
xdna_mailbox_stop_channel(ndev->mgmt_chann);
@@ -377,15 +377,17 @@ int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr,
return xdna_mailbox_send_msg(ndev->mgmt_chann, &msg, TX_TIMEOUT);
}
-int aie2_config_cu(struct amdxdna_hwctx *hwctx)
+int aie2_config_cu(struct amdxdna_hwctx *hwctx,
+ int (*notify_cb)(void *, void __iomem *, size_t))
{
struct mailbox_channel *chann = hwctx->priv->mbox_chann;
struct amdxdna_dev *xdna = hwctx->client->xdna;
u32 shift = xdna->dev_info->dev_mem_buf_shift;
- DECLARE_AIE2_MSG(config_cu, MSG_OP_CONFIG_CU);
+ struct config_cu_req req = { 0 };
+ struct xdna_mailbox_msg msg;
struct drm_gem_object *gobj;
struct amdxdna_gem_obj *abo;
- int ret, i;
+ int i;
if (!chann)
return -ENODEV;
@@ -423,18 +425,12 @@ int aie2_config_cu(struct amdxdna_hwctx *hwctx)
}
req.num_cus = hwctx->cus->num_cus;
- ret = xdna_send_msg_wait(xdna, chann, &msg);
- if (ret == -ETIME)
- aie2_destroy_context(xdna->dev_handle, hwctx);
-
- if (resp.status == AIE2_STATUS_SUCCESS) {
- XDNA_DBG(xdna, "Configure %d CUs, ret %d", req.num_cus, ret);
- return 0;
- }
-
- XDNA_ERR(xdna, "Command opcode 0x%x failed, status 0x%x ret %d",
- msg.opcode, resp.status, ret);
- return ret;
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ msg.handle = hwctx;
+ msg.opcode = MSG_OP_CONFIG_CU;
+ msg.notify_cb = notify_cb;
+ return xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
}
int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
@@ -753,7 +749,7 @@ int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
int ret = 0;
req.src_addr = 0;
- req.dst_addr = abo->mem.dev_addr - hwctx->client->dev_heap->mem.dev_addr;
+ req.dst_addr = amdxdna_dev_bo_offset(abo);
req.size = abo->mem.size;
/* Device to Host */
@@ -777,3 +773,32 @@ int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
return 0;
}
+
+int aie2_config_debug_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t))
+{
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_gem_obj *abo = to_xdna_obj(job->bos[0]);
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct config_debug_bo_req req;
+ struct xdna_mailbox_msg msg;
+
+ if (job->drv_cmd->opcode == ATTACH_DEBUG_BO)
+ req.config = DEBUG_BO_REGISTER;
+ else
+ req.config = DEBUG_BO_UNREGISTER;
+
+ req.offset = amdxdna_dev_bo_offset(abo);
+ req.size = abo->mem.size;
+
+ XDNA_DBG(xdna, "offset 0x%llx size 0x%llx config %d",
+ req.offset, req.size, req.config);
+
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ msg.opcode = MSG_OP_CONFIG_DEBUG_BO;
+
+ return xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+}
diff --git a/drivers/accel/amdxdna/aie2_msg_priv.h b/drivers/accel/amdxdna/aie2_msg_priv.h
index 6df9065b13f6..cb53132029eb 100644
--- a/drivers/accel/amdxdna/aie2_msg_priv.h
+++ b/drivers/accel/amdxdna/aie2_msg_priv.h
@@ -18,6 +18,7 @@ enum aie2_msg_opcode {
MSG_OP_CONFIG_CU = 0x11,
MSG_OP_CHAIN_EXEC_BUFFER_CF = 0x12,
MSG_OP_CHAIN_EXEC_DPU = 0x13,
+ MSG_OP_CONFIG_DEBUG_BO = 0x14,
MSG_OP_MAX_XRT_OPCODE,
MSG_OP_SUSPEND = 0x101,
MSG_OP_RESUME = 0x102,
@@ -365,4 +366,21 @@ struct sync_bo_req {
struct sync_bo_resp {
enum aie2_msg_status status;
} __packed;
+
+#define DEBUG_BO_UNREGISTER 0
+#define DEBUG_BO_REGISTER 1
+struct config_debug_bo_req {
+ __u64 offset;
+ __u64 size;
+ /*
+ * config operations.
+ * DEBUG_BO_REGISTER: Register debug buffer
+ * DEBUG_BO_UNREGISTER: Unregister debug buffer
+ */
+ __u32 config;
+} __packed;
+
+struct config_debug_bo_resp {
+ enum aie2_msg_status status;
+} __packed;
#endif /* _AIE2_MSG_PRIV_H_ */
diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
index 87c425e3d2b9..f48045318dc0 100644
--- a/drivers/accel/amdxdna/aie2_pci.c
+++ b/drivers/accel/amdxdna/aie2_pci.c
@@ -25,6 +25,7 @@
#include "amdxdna_gem.h"
#include "amdxdna_mailbox.h"
#include "amdxdna_pci_drv.h"
+#include "amdxdna_pm.h"
static int aie2_max_col = XRS_MAX_COL;
module_param(aie2_max_col, uint, 0600);
@@ -223,15 +224,6 @@ static int aie2_mgmt_fw_init(struct amdxdna_dev_hdl *ndev)
return ret;
}
- if (!ndev->async_events)
- return 0;
-
- ret = aie2_error_async_events_send(ndev);
- if (ret) {
- XDNA_ERR(ndev->xdna, "Send async events failed");
- return ret;
- }
-
return 0;
}
@@ -257,6 +249,8 @@ static int aie2_mgmt_fw_query(struct amdxdna_dev_hdl *ndev)
return ret;
}
+ ndev->total_col = min(aie2_max_col, ndev->metadata.cols);
+
return 0;
}
@@ -338,6 +332,7 @@ static void aie2_hw_stop(struct amdxdna_dev *xdna)
ndev->mbox = NULL;
aie2_psp_stop(ndev->psp_hdl);
aie2_smu_fini(ndev);
+ aie2_error_async_events_free(ndev);
pci_disable_device(pdev);
ndev->dev_status = AIE2_DEV_INIT;
@@ -424,6 +419,18 @@ static int aie2_hw_start(struct amdxdna_dev *xdna)
goto destroy_mgmt_chann;
}
+ ret = aie2_mgmt_fw_query(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to query fw, ret %d", ret);
+ goto destroy_mgmt_chann;
+ }
+
+ ret = aie2_error_async_events_alloc(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret);
+ goto destroy_mgmt_chann;
+ }
+
ndev->dev_status = AIE2_DEV_START;
return 0;
@@ -459,7 +466,6 @@ static int aie2_hw_resume(struct amdxdna_dev *xdna)
struct amdxdna_client *client;
int ret;
- guard(mutex)(&xdna->dev_lock);
ret = aie2_hw_start(xdna);
if (ret) {
XDNA_ERR(xdna, "Start hardware failed, %d", ret);
@@ -565,13 +571,6 @@ static int aie2_init(struct amdxdna_dev *xdna)
goto release_fw;
}
- ret = aie2_mgmt_fw_query(ndev);
- if (ret) {
- XDNA_ERR(xdna, "Query firmware failed, ret %d", ret);
- goto stop_hw;
- }
- ndev->total_col = min(aie2_max_col, ndev->metadata.cols);
-
xrs_cfg.clk_list.num_levels = ndev->max_dpm_level + 1;
for (i = 0; i < xrs_cfg.clk_list.num_levels; i++)
xrs_cfg.clk_list.cu_clk_list[i] = ndev->priv->dpm_clk_tbl[i].hclk;
@@ -587,30 +586,10 @@ static int aie2_init(struct amdxdna_dev *xdna)
goto stop_hw;
}
- ret = aie2_error_async_events_alloc(ndev);
- if (ret) {
- XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret);
- goto stop_hw;
- }
-
- ret = aie2_error_async_events_send(ndev);
- if (ret) {
- XDNA_ERR(xdna, "Send async events failed, ret %d", ret);
- goto async_event_free;
- }
-
- /* Issue a command to make sure firmware handled async events */
- ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
- if (ret) {
- XDNA_ERR(xdna, "Re-query firmware version failed");
- goto async_event_free;
- }
-
release_firmware(fw);
+ amdxdna_pm_init(xdna);
return 0;
-async_event_free:
- aie2_error_async_events_free(ndev);
stop_hw:
aie2_hw_stop(xdna);
release_fw:
@@ -621,10 +600,8 @@ release_fw:
static void aie2_fini(struct amdxdna_dev *xdna)
{
- struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
-
+ amdxdna_pm_fini(xdna);
aie2_hw_stop(xdna);
- aie2_error_async_events_free(ndev);
}
static int aie2_get_aie_status(struct amdxdna_client *client,
@@ -856,6 +833,10 @@ static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_i
if (!drm_dev_enter(&xdna->ddev, &idx))
return -ENODEV;
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto dev_exit;
+
switch (args->param) {
case DRM_AMDXDNA_QUERY_AIE_STATUS:
ret = aie2_get_aie_status(client, args);
@@ -882,8 +863,11 @@ static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_i
XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
ret = -EOPNOTSUPP;
}
+
+ amdxdna_pm_suspend_put(xdna);
XDNA_DBG(xdna, "Got param %d", args->param);
+dev_exit:
drm_dev_exit(idx);
return ret;
}
@@ -898,6 +882,12 @@ static int aie2_query_ctx_status_array(struct amdxdna_client *client,
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ if (args->element_size > SZ_4K || args->num_element > SZ_1K) {
+ XDNA_DBG(xdna, "Invalid element size %d or number of element %d",
+ args->element_size, args->num_element);
+ return -EINVAL;
+ }
+
array_args.element_size = min(args->element_size,
sizeof(struct amdxdna_drm_hwctx_entry));
array_args.buffer = args->buffer;
@@ -926,16 +916,26 @@ static int aie2_get_array(struct amdxdna_client *client,
if (!drm_dev_enter(&xdna->ddev, &idx))
return -ENODEV;
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto dev_exit;
+
switch (args->param) {
case DRM_AMDXDNA_HW_CONTEXT_ALL:
ret = aie2_query_ctx_status_array(client, args);
break;
+ case DRM_AMDXDNA_HW_LAST_ASYNC_ERR:
+ ret = aie2_get_array_async_error(xdna->dev_handle, args);
+ break;
default:
XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
ret = -EOPNOTSUPP;
}
+
+ amdxdna_pm_suspend_put(xdna);
XDNA_DBG(xdna, "Got param %d", args->param);
+dev_exit:
drm_dev_exit(idx);
return ret;
}
@@ -974,6 +974,10 @@ static int aie2_set_state(struct amdxdna_client *client,
if (!drm_dev_enter(&xdna->ddev, &idx))
return -ENODEV;
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto dev_exit;
+
switch (args->param) {
case DRM_AMDXDNA_SET_POWER_MODE:
ret = aie2_set_power_mode(client, args);
@@ -984,6 +988,8 @@ static int aie2_set_state(struct amdxdna_client *client,
break;
}
+ amdxdna_pm_suspend_put(xdna);
+dev_exit:
drm_dev_exit(idx);
return ret;
}
@@ -998,6 +1004,7 @@ const struct amdxdna_dev_ops aie2_ops = {
.hwctx_init = aie2_hwctx_init,
.hwctx_fini = aie2_hwctx_fini,
.hwctx_config = aie2_hwctx_config,
+ .hwctx_sync_debug_bo = aie2_hwctx_sync_debug_bo,
.cmd_submit = aie2_cmd_submit,
.hmm_invalidate = aie2_hmm_invalidate,
.get_array = aie2_get_array,
diff --git a/drivers/accel/amdxdna/aie2_pci.h b/drivers/accel/amdxdna/aie2_pci.h
index 91a8e948f82a..243ac21d50c1 100644
--- a/drivers/accel/amdxdna/aie2_pci.h
+++ b/drivers/accel/amdxdna/aie2_pci.h
@@ -190,6 +190,8 @@ struct amdxdna_dev_hdl {
enum aie2_dev_status dev_status;
u32 hwctx_num;
+
+ struct amdxdna_async_error last_async_err;
};
#define DEFINE_BAR_OFFSET(reg_name, bar, reg_addr) \
@@ -253,8 +255,9 @@ void aie2_psp_stop(struct psp_device *psp);
/* aie2_error.c */
int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev);
void aie2_error_async_events_free(struct amdxdna_dev_hdl *ndev);
-int aie2_error_async_events_send(struct amdxdna_dev_hdl *ndev);
int aie2_error_async_msg_thread(void *data);
+int aie2_get_array_async_error(struct amdxdna_dev_hdl *ndev,
+ struct amdxdna_drm_get_array *args);
/* aie2_message.c */
int aie2_suspend_fw(struct amdxdna_dev_hdl *ndev);
@@ -272,7 +275,8 @@ int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u6
int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, u32 size, u32 *cols_filled);
int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size,
void *handle, int (*cb)(void*, void __iomem *, size_t));
-int aie2_config_cu(struct amdxdna_hwctx *hwctx);
+int aie2_config_cu(struct amdxdna_hwctx *hwctx,
+ int (*notify_cb)(void *, void __iomem *, size_t));
int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
int (*notify_cb)(void *, void __iomem *, size_t));
int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
@@ -283,11 +287,14 @@ int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
int (*notify_cb)(void *, void __iomem *, size_t));
int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
int (*notify_cb)(void *, void __iomem *, size_t));
+int aie2_config_debug_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t));
/* aie2_hwctx.c */
int aie2_hwctx_init(struct amdxdna_hwctx *hwctx);
void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx);
int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size);
+int aie2_hwctx_sync_debug_bo(struct amdxdna_hwctx *hwctx, u32 debug_bo_hdl);
void aie2_hwctx_suspend(struct amdxdna_client *client);
int aie2_hwctx_resume(struct amdxdna_client *client);
int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
diff --git a/drivers/accel/amdxdna/aie2_smu.c b/drivers/accel/amdxdna/aie2_smu.c
index d303701b0ded..7f292a615ed8 100644
--- a/drivers/accel/amdxdna/aie2_smu.c
+++ b/drivers/accel/amdxdna/aie2_smu.c
@@ -11,6 +11,7 @@
#include "aie2_pci.h"
#include "amdxdna_pci_drv.h"
+#include "amdxdna_pm.h"
#define SMU_RESULT_OK 1
@@ -59,12 +60,16 @@ int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
u32 freq;
int ret;
+ ret = amdxdna_pm_resume_get(ndev->xdna);
+ if (ret)
+ return ret;
+
ret = aie2_smu_exec(ndev, AIE2_SMU_SET_MPNPUCLK_FREQ,
ndev->priv->dpm_clk_tbl[dpm_level].npuclk, &freq);
if (ret) {
XDNA_ERR(ndev->xdna, "Set npu clock to %d failed, ret %d\n",
ndev->priv->dpm_clk_tbl[dpm_level].npuclk, ret);
- return ret;
+ goto suspend_put;
}
ndev->npuclk_freq = freq;
@@ -73,8 +78,10 @@ int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
if (ret) {
XDNA_ERR(ndev->xdna, "Set h clock to %d failed, ret %d\n",
ndev->priv->dpm_clk_tbl[dpm_level].hclk, ret);
- return ret;
+ goto suspend_put;
}
+
+ amdxdna_pm_suspend_put(ndev->xdna);
ndev->hclk_freq = freq;
ndev->dpm_level = dpm_level;
@@ -82,26 +89,35 @@ int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
ndev->npuclk_freq, ndev->hclk_freq);
return 0;
+
+suspend_put:
+ amdxdna_pm_suspend_put(ndev->xdna);
+ return ret;
}
int npu4_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
{
int ret;
+ ret = amdxdna_pm_resume_get(ndev->xdna);
+ if (ret)
+ return ret;
+
ret = aie2_smu_exec(ndev, AIE2_SMU_SET_HARD_DPMLEVEL, dpm_level, NULL);
if (ret) {
XDNA_ERR(ndev->xdna, "Set hard dpm level %d failed, ret %d ",
dpm_level, ret);
- return ret;
+ goto suspend_put;
}
ret = aie2_smu_exec(ndev, AIE2_SMU_SET_SOFT_DPMLEVEL, dpm_level, NULL);
if (ret) {
XDNA_ERR(ndev->xdna, "Set soft dpm level %d failed, ret %d",
dpm_level, ret);
- return ret;
+ goto suspend_put;
}
+ amdxdna_pm_suspend_put(ndev->xdna);
ndev->npuclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].npuclk;
ndev->hclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].hclk;
ndev->dpm_level = dpm_level;
@@ -110,6 +126,10 @@ int npu4_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
ndev->npuclk_freq, ndev->hclk_freq);
return 0;
+
+suspend_put:
+ amdxdna_pm_suspend_put(ndev->xdna);
+ return ret;
}
int aie2_smu_init(struct amdxdna_dev_hdl *ndev)
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
index 4bfe4ef20550..d18182c59668 100644
--- a/drivers/accel/amdxdna/amdxdna_ctx.c
+++ b/drivers/accel/amdxdna/amdxdna_ctx.c
@@ -161,19 +161,14 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
if (args->ext || args->ext_flags)
return -EINVAL;
- if (!drm_dev_enter(dev, &idx))
- return -ENODEV;
-
hwctx = kzalloc(sizeof(*hwctx), GFP_KERNEL);
- if (!hwctx) {
- ret = -ENOMEM;
- goto exit;
- }
+ if (!hwctx)
+ return -ENOMEM;
if (copy_from_user(&hwctx->qos, u64_to_user_ptr(args->qos_p), sizeof(hwctx->qos))) {
XDNA_ERR(xdna, "Access QoS info failed");
- ret = -EFAULT;
- goto free_hwctx;
+ kfree(hwctx);
+ return -EFAULT;
}
hwctx->client = client;
@@ -181,30 +176,36 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
hwctx->num_tiles = args->num_tiles;
hwctx->mem_size = args->mem_size;
hwctx->max_opc = args->max_opc;
- ret = xa_alloc_cyclic(&client->hwctx_xa, &hwctx->id, hwctx,
- XA_LIMIT(AMDXDNA_INVALID_CTX_HANDLE + 1, MAX_HWCTX_ID),
- &client->next_hwctxid, GFP_KERNEL);
- if (ret < 0) {
- XDNA_ERR(xdna, "Allocate hwctx ID failed, ret %d", ret);
+
+ guard(mutex)(&xdna->dev_lock);
+
+ if (!drm_dev_enter(dev, &idx)) {
+ ret = -ENODEV;
goto free_hwctx;
}
- hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->id);
+ ret = xdna->dev_info->ops->hwctx_init(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret);
+ goto dev_exit;
+ }
+
+ hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->fw_ctx_id);
if (!hwctx->name) {
ret = -ENOMEM;
- goto rm_id;
+ goto fini_hwctx;
}
- mutex_lock(&xdna->dev_lock);
- ret = xdna->dev_info->ops->hwctx_init(hwctx);
- if (ret) {
- mutex_unlock(&xdna->dev_lock);
- XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret);
+ ret = xa_alloc_cyclic(&client->hwctx_xa, &hwctx->id, hwctx,
+ XA_LIMIT(AMDXDNA_INVALID_CTX_HANDLE + 1, MAX_HWCTX_ID),
+ &client->next_hwctxid, GFP_KERNEL);
+ if (ret < 0) {
+ XDNA_ERR(xdna, "Allocate hwctx ID failed, ret %d", ret);
goto free_name;
}
+
args->handle = hwctx->id;
args->syncobj_handle = hwctx->syncobj_hdl;
- mutex_unlock(&xdna->dev_lock);
atomic64_set(&hwctx->job_submit_cnt, 0);
atomic64_set(&hwctx->job_free_cnt, 0);
@@ -214,12 +215,12 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
free_name:
kfree(hwctx->name);
-rm_id:
- xa_erase(&client->hwctx_xa, hwctx->id);
+fini_hwctx:
+ xdna->dev_info->ops->hwctx_fini(hwctx);
+dev_exit:
+ drm_dev_exit(idx);
free_hwctx:
kfree(hwctx);
-exit:
- drm_dev_exit(idx);
return ret;
}
@@ -327,6 +328,38 @@ unlock_srcu:
return ret;
}
+int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_hwctx *hwctx;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+ int ret, idx;
+
+ if (!xdna->dev_info->ops->hwctx_sync_debug_bo)
+ return -EOPNOTSUPP;
+
+ gobj = drm_gem_object_lookup(client->filp, debug_bo_hdl);
+ if (!gobj)
+ return -EINVAL;
+
+ abo = to_xdna_obj(gobj);
+ guard(mutex)(&xdna->dev_lock);
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ hwctx = xa_load(&client->hwctx_xa, abo->assigned_hwctx);
+ if (!hwctx) {
+ ret = -EINVAL;
+ goto unlock_srcu;
+ }
+
+ ret = xdna->dev_info->ops->hwctx_sync_debug_bo(hwctx, debug_bo_hdl);
+
+unlock_srcu:
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+ drm_gem_object_put(gobj);
+ return ret;
+}
+
static void
amdxdna_arg_bos_put(struct amdxdna_sched_job *job)
{
@@ -392,6 +425,7 @@ void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job)
}
int amdxdna_cmd_submit(struct amdxdna_client *client,
+ struct amdxdna_drv_cmd *drv_cmd,
u32 cmd_bo_hdl, u32 *arg_bo_hdls, u32 arg_bo_cnt,
u32 hwctx_hdl, u64 *seq)
{
@@ -405,6 +439,8 @@ int amdxdna_cmd_submit(struct amdxdna_client *client,
if (!job)
return -ENOMEM;
+ job->drv_cmd = drv_cmd;
+
if (cmd_bo_hdl != AMDXDNA_INVALID_BO_HANDLE) {
job->cmd_bo = amdxdna_gem_get_obj(client, cmd_bo_hdl, AMDXDNA_BO_CMD);
if (!job->cmd_bo) {
@@ -412,8 +448,6 @@ int amdxdna_cmd_submit(struct amdxdna_client *client,
ret = -EINVAL;
goto free_job;
}
- } else {
- job->cmd_bo = NULL;
}
ret = amdxdna_arg_bos_lookup(client, job, arg_bo_hdls, arg_bo_cnt);
@@ -431,11 +465,6 @@ int amdxdna_cmd_submit(struct amdxdna_client *client,
goto unlock_srcu;
}
- if (hwctx->status != HWCTX_STAT_READY) {
- XDNA_ERR(xdna, "HW Context is not ready");
- ret = -EINVAL;
- goto unlock_srcu;
- }
job->hwctx = hwctx;
job->mm = current->mm;
@@ -512,7 +541,7 @@ static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client,
}
}
- ret = amdxdna_cmd_submit(client, cmd_bo_hdl, arg_bo_hdls,
+ ret = amdxdna_cmd_submit(client, NULL, cmd_bo_hdl, arg_bo_hdls,
args->arg_count, args->hwctx, &args->seq);
if (ret)
XDNA_DBG(xdna, "Submit cmds failed, ret %d", ret);
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.h b/drivers/accel/amdxdna/amdxdna_ctx.h
index 7cd7a55936f0..cbe60efbe60b 100644
--- a/drivers/accel/amdxdna/amdxdna_ctx.h
+++ b/drivers/accel/amdxdna/amdxdna_ctx.h
@@ -95,6 +95,17 @@ struct amdxdna_hwctx {
#define drm_job_to_xdna_job(j) \
container_of(j, struct amdxdna_sched_job, base)
+enum amdxdna_job_opcode {
+ SYNC_DEBUG_BO,
+ ATTACH_DEBUG_BO,
+ DETACH_DEBUG_BO,
+};
+
+struct amdxdna_drv_cmd {
+ enum amdxdna_job_opcode opcode;
+ u32 result;
+};
+
struct amdxdna_sched_job {
struct drm_sched_job base;
struct kref refcnt;
@@ -106,6 +117,7 @@ struct amdxdna_sched_job {
struct dma_fence *out_fence;
bool job_done;
u64 seq;
+ struct amdxdna_drv_cmd *drv_cmd;
struct amdxdna_gem_obj *cmd_bo;
size_t bo_cnt;
struct drm_gem_object *bos[] __counted_by(bo_cnt);
@@ -143,9 +155,11 @@ void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job);
void amdxdna_hwctx_remove_all(struct amdxdna_client *client);
int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
int (*walk)(struct amdxdna_hwctx *hwctx, void *arg));
+int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl);
int amdxdna_cmd_submit(struct amdxdna_client *client,
- u32 cmd_bo_hdls, u32 *arg_bo_hdls, u32 arg_bo_cnt,
+ struct amdxdna_drv_cmd *drv_cmd, u32 cmd_bo_hdls,
+ u32 *arg_bo_hdls, u32 arg_bo_cnt,
u32 hwctx_hdl, u64 *seq);
int amdxdna_cmd_wait(struct amdxdna_client *client, u32 hwctx_hdl,
diff --git a/drivers/accel/amdxdna/amdxdna_error.h b/drivers/accel/amdxdna/amdxdna_error.h
new file mode 100644
index 000000000000..c51de86ec12b
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_error.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_ERROR_H_
+#define _AMDXDNA_ERROR_H_
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
+#define AMDXDNA_ERR_DRV_AIE 4
+#define AMDXDNA_ERR_SEV_CRITICAL 3
+#define AMDXDNA_ERR_CLASS_AIE 2
+
+#define AMDXDNA_ERR_NUM_MASK GENMASK_U64(15, 0)
+#define AMDXDNA_ERR_DRV_MASK GENMASK_U64(23, 16)
+#define AMDXDNA_ERR_SEV_MASK GENMASK_U64(31, 24)
+#define AMDXDNA_ERR_MOD_MASK GENMASK_U64(39, 32)
+#define AMDXDNA_ERR_CLASS_MASK GENMASK_U64(47, 40)
+
+enum amdxdna_error_num {
+ AMDXDNA_ERROR_NUM_AIE_SATURATION = 3,
+ AMDXDNA_ERROR_NUM_AIE_FP,
+ AMDXDNA_ERROR_NUM_AIE_STREAM,
+ AMDXDNA_ERROR_NUM_AIE_ACCESS,
+ AMDXDNA_ERROR_NUM_AIE_BUS,
+ AMDXDNA_ERROR_NUM_AIE_INSTRUCTION,
+ AMDXDNA_ERROR_NUM_AIE_ECC,
+ AMDXDNA_ERROR_NUM_AIE_LOCK,
+ AMDXDNA_ERROR_NUM_AIE_DMA,
+ AMDXDNA_ERROR_NUM_AIE_MEM_PARITY,
+ AMDXDNA_ERROR_NUM_UNKNOWN = 15,
+};
+
+enum amdxdna_error_module {
+ AMDXDNA_ERROR_MODULE_AIE_CORE = 3,
+ AMDXDNA_ERROR_MODULE_AIE_MEMORY,
+ AMDXDNA_ERROR_MODULE_AIE_SHIM,
+ AMDXDNA_ERROR_MODULE_AIE_NOC,
+ AMDXDNA_ERROR_MODULE_AIE_PL,
+ AMDXDNA_ERROR_MODULE_UNKNOWN = 8,
+};
+
+#define AMDXDNA_ERROR_ENCODE(err_num, err_mod) \
+ (FIELD_PREP(AMDXDNA_ERR_NUM_MASK, err_num) | \
+ FIELD_PREP_CONST(AMDXDNA_ERR_DRV_MASK, AMDXDNA_ERR_DRV_AIE) | \
+ FIELD_PREP_CONST(AMDXDNA_ERR_SEV_MASK, AMDXDNA_ERR_SEV_CRITICAL) | \
+ FIELD_PREP(AMDXDNA_ERR_MOD_MASK, err_mod) | \
+ FIELD_PREP_CONST(AMDXDNA_ERR_CLASS_MASK, AMDXDNA_ERR_CLASS_AIE))
+
+#define AMDXDNA_EXTRA_ERR_COL_MASK GENMASK_U64(7, 0)
+#define AMDXDNA_EXTRA_ERR_ROW_MASK GENMASK_U64(15, 8)
+
+#define AMDXDNA_EXTRA_ERR_ENCODE(row, col) \
+ (FIELD_PREP(AMDXDNA_EXTRA_ERR_COL_MASK, col) | \
+ FIELD_PREP(AMDXDNA_EXTRA_ERR_ROW_MASK, row))
+
+#endif /* _AMDXDNA_ERROR_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c
index d407a36eb412..61e0136c21a8 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.c
+++ b/drivers/accel/amdxdna/amdxdna_gem.c
@@ -392,35 +392,33 @@ static const struct dma_buf_ops amdxdna_dmabuf_ops = {
.vunmap = drm_gem_dmabuf_vunmap,
};
-static int amdxdna_gem_obj_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+static int amdxdna_gem_obj_vmap(struct amdxdna_gem_obj *abo, void **vaddr)
{
- struct amdxdna_gem_obj *abo = to_xdna_obj(obj);
-
- iosys_map_clear(map);
-
- dma_resv_assert_held(obj->resv);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
+ int ret;
if (is_import_bo(abo))
- dma_buf_vmap(abo->dma_buf, map);
+ ret = dma_buf_vmap_unlocked(abo->dma_buf, &map);
else
- drm_gem_shmem_object_vmap(obj, map);
+ ret = drm_gem_vmap(to_gobj(abo), &map);
- if (!map->vaddr)
- return -ENOMEM;
-
- return 0;
+ *vaddr = map.vaddr;
+ return ret;
}
-static void amdxdna_gem_obj_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+static void amdxdna_gem_obj_vunmap(struct amdxdna_gem_obj *abo)
{
- struct amdxdna_gem_obj *abo = to_xdna_obj(obj);
+ struct iosys_map map;
+
+ if (!abo->mem.kva)
+ return;
- dma_resv_assert_held(obj->resv);
+ iosys_map_set_vaddr(&map, abo->mem.kva);
if (is_import_bo(abo))
- dma_buf_vunmap(abo->dma_buf, map);
+ dma_buf_vunmap_unlocked(abo->dma_buf, &map);
else
- drm_gem_shmem_object_vunmap(obj, map);
+ drm_gem_vunmap(to_gobj(abo), &map);
}
static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags)
@@ -455,7 +453,6 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
{
struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
- struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva);
XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
@@ -468,7 +465,7 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
if (abo->type == AMDXDNA_BO_DEV_HEAP)
drm_mm_takedown(&abo->mm);
- drm_gem_vunmap(gobj, &map);
+ amdxdna_gem_obj_vunmap(abo);
mutex_destroy(&abo->lock);
if (is_import_bo(abo)) {
@@ -489,8 +486,8 @@ static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
.pin = drm_gem_shmem_object_pin,
.unpin = drm_gem_shmem_object_unpin,
.get_sg_table = drm_gem_shmem_object_get_sg_table,
- .vmap = amdxdna_gem_obj_vmap,
- .vunmap = amdxdna_gem_obj_vunmap,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
.mmap = amdxdna_gem_obj_mmap,
.vm_ops = &drm_gem_shmem_vm_ops,
.export = amdxdna_gem_prime_export,
@@ -663,7 +660,6 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
struct drm_file *filp)
{
struct amdxdna_client *client = filp->driver_priv;
- struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
struct amdxdna_dev *xdna = to_xdna_dev(dev);
struct amdxdna_gem_obj *abo;
int ret;
@@ -692,12 +688,11 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base;
drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size);
- ret = drm_gem_vmap(to_gobj(abo), &map);
+ ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva);
if (ret) {
XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret);
goto release_obj;
}
- abo->mem.kva = map.vaddr;
client->dev_heap = abo;
drm_gem_object_get(to_gobj(abo));
@@ -748,7 +743,6 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
struct amdxdna_drm_create_bo *args,
struct drm_file *filp)
{
- struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
struct amdxdna_dev *xdna = to_xdna_dev(dev);
struct amdxdna_gem_obj *abo;
int ret;
@@ -770,12 +764,11 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
abo->type = AMDXDNA_BO_CMD;
abo->client = filp->driver_priv;
- ret = drm_gem_vmap(to_gobj(abo), &map);
+ ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva);
if (ret) {
XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
goto release_obj;
}
- abo->mem.kva = map.vaddr;
return abo;
@@ -969,6 +962,9 @@ int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev,
XDNA_DBG(xdna, "Sync bo %d offset 0x%llx, size 0x%llx\n",
args->handle, args->offset, args->size);
+ if (args->direction == SYNC_DIRECT_FROM_DEVICE)
+ ret = amdxdna_hwctx_sync_debug_bo(abo->client, args->handle);
+
put_obj:
drm_gem_object_put(gobj);
return ret;
diff --git a/drivers/accel/amdxdna/amdxdna_gem.h b/drivers/accel/amdxdna/amdxdna_gem.h
index ae29db94a9d3..f79fc7f3c93b 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.h
+++ b/drivers/accel/amdxdna/amdxdna_gem.h
@@ -7,6 +7,7 @@
#define _AMDXDNA_GEM_H_
#include <linux/hmm.h>
+#include "amdxdna_pci_drv.h"
struct amdxdna_umap {
struct vm_area_struct *vma;
@@ -62,6 +63,11 @@ static inline void amdxdna_gem_put_obj(struct amdxdna_gem_obj *abo)
drm_gem_object_put(to_gobj(abo));
}
+static inline u64 amdxdna_dev_bo_offset(struct amdxdna_gem_obj *abo)
+{
+ return abo->mem.dev_addr - abo->client->dev_heap->mem.dev_addr;
+}
+
void amdxdna_umap_put(struct amdxdna_umap *mapp);
struct drm_gem_object *
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.c b/drivers/accel/amdxdna/amdxdna_mailbox.c
index da1ac89bb78f..24258dcc18eb 100644
--- a/drivers/accel/amdxdna/amdxdna_mailbox.c
+++ b/drivers/accel/amdxdna/amdxdna_mailbox.c
@@ -194,7 +194,8 @@ static void mailbox_release_msg(struct mailbox_channel *mb_chann,
{
MB_DBG(mb_chann, "msg_id 0x%x msg opcode 0x%x",
mb_msg->pkg.header.id, mb_msg->pkg.header.opcode);
- mb_msg->notify_cb(mb_msg->handle, NULL, 0);
+ if (mb_msg->notify_cb)
+ mb_msg->notify_cb(mb_msg->handle, NULL, 0);
kfree(mb_msg);
}
@@ -248,7 +249,7 @@ mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *heade
{
struct mailbox_msg *mb_msg;
int msg_id;
- int ret;
+ int ret = 0;
msg_id = header->id;
if (!mailbox_validate_msgid(msg_id)) {
@@ -265,9 +266,11 @@ mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *heade
MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
header->opcode, header->total_size, header->id);
- ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size);
- if (unlikely(ret))
- MB_ERR(mb_chann, "Message callback ret %d", ret);
+ if (mb_msg->notify_cb) {
+ ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size);
+ if (unlikely(ret))
+ MB_ERR(mb_chann, "Message callback ret %d", ret);
+ }
kfree(mb_msg);
return ret;
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c
index 569cd703729d..3599e713bfcb 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.c
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c
@@ -13,13 +13,11 @@
#include <drm/gpu_scheduler.h>
#include <linux/iommu.h>
#include <linux/pci.h>
-#include <linux/pm_runtime.h>
#include "amdxdna_ctx.h"
#include "amdxdna_gem.h"
#include "amdxdna_pci_drv.h"
-
-#define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */
+#include "amdxdna_pm.h"
MODULE_FIRMWARE("amdnpu/1502_00/npu.sbin");
MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin");
@@ -29,9 +27,11 @@ MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin");
/*
* 0.0: Initial version
* 0.1: Support getting all hardware contexts by DRM_IOCTL_AMDXDNA_GET_ARRAY
+ * 0.2: Support getting last error hardware error
+ * 0.3: Support firmware debug buffer
*/
#define AMDXDNA_DRIVER_MAJOR 0
-#define AMDXDNA_DRIVER_MINOR 1
+#define AMDXDNA_DRIVER_MINOR 3
/*
* Bind the driver base on (vendor_id, device_id) pair and later use the
@@ -61,17 +61,9 @@ static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp)
struct amdxdna_client *client;
int ret;
- ret = pm_runtime_resume_and_get(ddev->dev);
- if (ret) {
- XDNA_ERR(xdna, "Failed to get rpm, ret %d", ret);
- return ret;
- }
-
client = kzalloc(sizeof(*client), GFP_KERNEL);
- if (!client) {
- ret = -ENOMEM;
- goto put_rpm;
- }
+ if (!client)
+ return -ENOMEM;
client->pid = pid_nr(rcu_access_pointer(filp->pid));
client->xdna = xdna;
@@ -106,9 +98,6 @@ unbind_sva:
iommu_sva_unbind_device(client->sva);
failed:
kfree(client);
-put_rpm:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
@@ -130,8 +119,6 @@ static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp)
XDNA_DBG(xdna, "pid %d closed", client->pid);
kfree(client);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
}
static int amdxdna_flush(struct file *f, fl_owner_t id)
@@ -310,19 +297,12 @@ static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto failed_dev_fini;
}
- pm_runtime_set_autosuspend_delay(dev, AMDXDNA_AUTOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(dev);
- pm_runtime_allow(dev);
-
ret = drm_dev_register(&xdna->ddev, 0);
if (ret) {
XDNA_ERR(xdna, "DRM register failed, ret %d", ret);
- pm_runtime_forbid(dev);
goto failed_sysfs_fini;
}
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
return 0;
failed_sysfs_fini:
@@ -339,14 +319,10 @@ destroy_notifier_wq:
static void amdxdna_remove(struct pci_dev *pdev)
{
struct amdxdna_dev *xdna = pci_get_drvdata(pdev);
- struct device *dev = &pdev->dev;
struct amdxdna_client *client;
destroy_workqueue(xdna->notifier_wq);
- pm_runtime_get_noresume(dev);
- pm_runtime_forbid(dev);
-
drm_dev_unplug(&xdna->ddev);
amdxdna_sysfs_fini(xdna);
@@ -365,29 +341,9 @@ static void amdxdna_remove(struct pci_dev *pdev)
mutex_unlock(&xdna->dev_lock);
}
-static int amdxdna_pmops_suspend(struct device *dev)
-{
- struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
-
- if (!xdna->dev_info->ops->suspend)
- return -EOPNOTSUPP;
-
- return xdna->dev_info->ops->suspend(xdna);
-}
-
-static int amdxdna_pmops_resume(struct device *dev)
-{
- struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
-
- if (!xdna->dev_info->ops->resume)
- return -EOPNOTSUPP;
-
- return xdna->dev_info->ops->resume(xdna);
-}
-
static const struct dev_pm_ops amdxdna_pm_ops = {
- SYSTEM_SLEEP_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume)
- RUNTIME_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume)
+ RUNTIME_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume, NULL)
};
static struct pci_driver amdxdna_pci_driver = {
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h b/drivers/accel/amdxdna/amdxdna_pci_drv.h
index 72d6696d49da..c99477f5e454 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.h
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h
@@ -6,6 +6,7 @@
#ifndef _AMDXDNA_PCI_DRV_H_
#define _AMDXDNA_PCI_DRV_H_
+#include <drm/drm_print.h>
#include <linux/workqueue.h>
#include <linux/xarray.h>
@@ -54,6 +55,7 @@ struct amdxdna_dev_ops {
int (*hwctx_init)(struct amdxdna_hwctx *hwctx);
void (*hwctx_fini)(struct amdxdna_hwctx *hwctx);
int (*hwctx_config)(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size);
+ int (*hwctx_sync_debug_bo)(struct amdxdna_hwctx *hwctx, u32 debug_bo_hdl);
void (*hmm_invalidate)(struct amdxdna_gem_obj *abo, unsigned long cur_seq);
int (*cmd_submit)(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
int (*get_aie_info)(struct amdxdna_client *client, struct amdxdna_drm_get_info *args);
@@ -99,6 +101,7 @@ struct amdxdna_dev {
struct amdxdna_fw_ver fw_ver;
struct rw_semaphore notifier_lock; /* for mmu notifier*/
struct workqueue_struct *notifier_wq;
+ bool rpm_on;
};
/*
diff --git a/drivers/accel/amdxdna/amdxdna_pm.c b/drivers/accel/amdxdna/amdxdna_pm.c
new file mode 100644
index 000000000000..fa38e65d617c
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_pm.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_drv.h>
+#include <linux/pm_runtime.h>
+
+#include "amdxdna_pm.h"
+
+#define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */
+
+int amdxdna_pm_suspend(struct device *dev)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
+ int ret = -EOPNOTSUPP;
+ bool rpm;
+
+ if (xdna->dev_info->ops->suspend) {
+ rpm = xdna->rpm_on;
+ xdna->rpm_on = false;
+ ret = xdna->dev_info->ops->suspend(xdna);
+ xdna->rpm_on = rpm;
+ }
+
+ XDNA_DBG(xdna, "Suspend done ret %d", ret);
+ return ret;
+}
+
+int amdxdna_pm_resume(struct device *dev)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
+ int ret = -EOPNOTSUPP;
+ bool rpm;
+
+ if (xdna->dev_info->ops->resume) {
+ rpm = xdna->rpm_on;
+ xdna->rpm_on = false;
+ ret = xdna->dev_info->ops->resume(xdna);
+ xdna->rpm_on = rpm;
+ }
+
+ XDNA_DBG(xdna, "Resume done ret %d", ret);
+ return ret;
+}
+
+int amdxdna_pm_resume_get(struct amdxdna_dev *xdna)
+{
+ struct device *dev = xdna->ddev.dev;
+ int ret;
+
+ if (!xdna->rpm_on)
+ return 0;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ XDNA_ERR(xdna, "Resume failed: %d", ret);
+ pm_runtime_set_suspended(dev);
+ }
+
+ return ret;
+}
+
+void amdxdna_pm_suspend_put(struct amdxdna_dev *xdna)
+{
+ struct device *dev = xdna->ddev.dev;
+
+ if (!xdna->rpm_on)
+ return;
+
+ pm_runtime_put_autosuspend(dev);
+}
+
+void amdxdna_pm_init(struct amdxdna_dev *xdna)
+{
+ struct device *dev = xdna->ddev.dev;
+
+ pm_runtime_set_active(dev);
+ pm_runtime_set_autosuspend_delay(dev, AMDXDNA_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_allow(dev);
+ pm_runtime_put_autosuspend(dev);
+ xdna->rpm_on = true;
+}
+
+void amdxdna_pm_fini(struct amdxdna_dev *xdna)
+{
+ struct device *dev = xdna->ddev.dev;
+
+ xdna->rpm_on = false;
+ pm_runtime_get_noresume(dev);
+ pm_runtime_forbid(dev);
+}
diff --git a/drivers/accel/amdxdna/amdxdna_pm.h b/drivers/accel/amdxdna/amdxdna_pm.h
new file mode 100644
index 000000000000..77b2d6e45570
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_pm.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_PM_H_
+#define _AMDXDNA_PM_H_
+
+#include "amdxdna_pci_drv.h"
+
+int amdxdna_pm_suspend(struct device *dev);
+int amdxdna_pm_resume(struct device *dev);
+int amdxdna_pm_resume_get(struct amdxdna_dev *xdna);
+void amdxdna_pm_suspend_put(struct amdxdna_dev *xdna);
+void amdxdna_pm_init(struct amdxdna_dev *xdna);
+void amdxdna_pm_fini(struct amdxdna_dev *xdna);
+
+#endif /* _AMDXDNA_PM_H_ */
diff --git a/drivers/accel/amdxdna/npu1_regs.c b/drivers/accel/amdxdna/npu1_regs.c
index e4f6dac7d00f..10124cccb102 100644
--- a/drivers/accel/amdxdna/npu1_regs.c
+++ b/drivers/accel/amdxdna/npu1_regs.c
@@ -46,6 +46,7 @@
const struct rt_config npu1_default_rt_cfg[] = {
{ 2, 1, AIE2_RT_CFG_INIT }, /* PDI APP LOAD MODE */
+ { 4, 1, AIE2_RT_CFG_INIT }, /* Debug BO */
{ 1, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
{ 0 },
};
diff --git a/drivers/accel/amdxdna/npu4_regs.c b/drivers/accel/amdxdna/npu4_regs.c
index 9f2e33182ec6..e1da882420ec 100644
--- a/drivers/accel/amdxdna/npu4_regs.c
+++ b/drivers/accel/amdxdna/npu4_regs.c
@@ -63,6 +63,7 @@
const struct rt_config npu4_default_rt_cfg[] = {
{ 5, 1, AIE2_RT_CFG_INIT }, /* PDI APP LOAD MODE */
+ { 10, 1, AIE2_RT_CFG_INIT }, /* DEBUG BUF */
{ 1, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
{ 2, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
{ 3, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
index cd24ccd20ba6..3bd85ee6c26b 100644
--- a/drivers/accel/ivpu/ivpu_debugfs.c
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -398,35 +398,25 @@ static int dct_active_set(void *data, u64 active_percent)
DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n");
+static void print_priority_band(struct seq_file *s, struct ivpu_hw_info *hw,
+ int band, const char *name)
+{
+ seq_printf(s, "%-9s: grace_period %9u process_grace_period %9u process_quantum %9u\n",
+ name,
+ hw->hws.grace_period[band],
+ hw->hws.process_grace_period[band],
+ hw->hws.process_quantum[band]);
+}
+
static int priority_bands_show(struct seq_file *s, void *v)
{
struct ivpu_device *vdev = s->private;
struct ivpu_hw_info *hw = vdev->hw;
- for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
- band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
- switch (band) {
- case VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE:
- seq_puts(s, "Idle: ");
- break;
-
- case VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL:
- seq_puts(s, "Normal: ");
- break;
-
- case VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS:
- seq_puts(s, "Focus: ");
- break;
-
- case VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME:
- seq_puts(s, "Realtime: ");
- break;
- }
-
- seq_printf(s, "grace_period %9u process_grace_period %9u process_quantum %9u\n",
- hw->hws.grace_period[band], hw->hws.process_grace_period[band],
- hw->hws.process_quantum[band]);
- }
+ print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE, "Idle");
+ print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL, "Normal");
+ print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS, "Focus");
+ print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME, "Realtime");
return 0;
}
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 3289751b4757..1792d0bbec71 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -200,6 +200,9 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
case DRM_IVPU_PARAM_CAPABILITIES:
args->value = ivpu_is_capable(vdev, args->index);
break;
+ case DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE:
+ args->value = ivpu_fw_preempt_buf_size(vdev);
+ break;
default:
ret = -EINVAL;
break;
@@ -377,8 +380,7 @@ int ivpu_boot(struct ivpu_device *vdev)
drm_WARN_ON(&vdev->drm, atomic_read(&vdev->job_timeout_counter));
drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
- /* Update boot params located at first 4KB of FW memory */
- ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem));
+ ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem_bp));
ret = ivpu_hw_boot_fw(vdev);
if (ret) {
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 9db741695401..be1290be77fd 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -17,15 +17,10 @@
#include "ivpu_ipc.h"
#include "ivpu_pm.h"
-#define FW_GLOBAL_MEM_START (2ull * SZ_1G)
-#define FW_GLOBAL_MEM_END (3ull * SZ_1G)
-#define FW_SHARED_MEM_SIZE SZ_256M /* Must be aligned to FW_SHARED_MEM_ALIGNMENT */
-#define FW_SHARED_MEM_ALIGNMENT SZ_128K /* VPU MTRR limitation */
-#define FW_RUNTIME_MAX_SIZE SZ_512M
#define FW_SHAVE_NN_MAX_SIZE SZ_2M
-#define FW_RUNTIME_MIN_ADDR (FW_GLOBAL_MEM_START)
-#define FW_RUNTIME_MAX_ADDR (FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE)
#define FW_FILE_IMAGE_OFFSET (VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE)
+#define FW_PREEMPT_BUF_MIN_SIZE SZ_4K
+#define FW_PREEMPT_BUF_MAX_SIZE SZ_32M
#define WATCHDOG_MSS_REDIRECT 32
#define WATCHDOG_NCE_REDIRECT 33
@@ -131,9 +126,14 @@ ivpu_fw_check_api_ver_lt(struct ivpu_device *vdev, const struct vpu_firmware_hea
return false;
}
-static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range_size)
+bool ivpu_is_within_range(u64 addr, size_t size, struct ivpu_addr_range *range)
{
- if (addr < range_start || addr + size > range_start + range_size)
+ u64 addr_end;
+
+ if (!range || check_add_overflow(addr, size, &addr_end))
+ return false;
+
+ if (addr < range->start || addr_end > range->end)
return false;
return true;
@@ -151,11 +151,56 @@ ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_he
return VPU_SCHEDULING_MODE_HW;
}
+static void
+ivpu_preemption_config_parse(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr)
+{
+ struct ivpu_fw_info *fw = vdev->fw;
+ u32 primary_preempt_buf_size, secondary_preempt_buf_size;
+
+ if (fw_hdr->preemption_buffer_1_max_size)
+ primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size;
+ else
+ primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
+
+ if (fw_hdr->preemption_buffer_2_max_size)
+ secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size;
+ else
+ secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
+
+ ivpu_dbg(vdev, FW_BOOT, "Preemption buffer size, primary: %u, secondary: %u\n",
+ primary_preempt_buf_size, secondary_preempt_buf_size);
+
+ if (primary_preempt_buf_size < FW_PREEMPT_BUF_MIN_SIZE ||
+ secondary_preempt_buf_size < FW_PREEMPT_BUF_MIN_SIZE) {
+ ivpu_warn(vdev, "Preemption buffers size too small\n");
+ return;
+ }
+
+ if (primary_preempt_buf_size > FW_PREEMPT_BUF_MAX_SIZE ||
+ secondary_preempt_buf_size > FW_PREEMPT_BUF_MAX_SIZE) {
+ ivpu_warn(vdev, "Preemption buffers size too big\n");
+ return;
+ }
+
+ if (fw->sched_mode != VPU_SCHEDULING_MODE_HW)
+ return;
+
+ if (ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE)
+ return;
+
+ vdev->fw->primary_preempt_buf_size = ALIGN(primary_preempt_buf_size, PAGE_SIZE);
+ vdev->fw->secondary_preempt_buf_size = ALIGN(secondary_preempt_buf_size, PAGE_SIZE);
+}
+
static int ivpu_fw_parse(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
const struct vpu_firmware_header *fw_hdr = (const void *)fw->file->data;
- u64 runtime_addr, image_load_addr, runtime_size, image_size;
+ struct ivpu_addr_range fw_image_range;
+ u64 boot_params_addr, boot_params_size;
+ u64 fw_version_addr, fw_version_size;
+ u64 runtime_addr, runtime_size;
+ u64 image_load_addr, image_size;
if (fw->file->size <= FW_FILE_IMAGE_OFFSET) {
ivpu_err(vdev, "Firmware file is too small: %zu\n", fw->file->size);
@@ -167,18 +212,37 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
return -EINVAL;
}
- runtime_addr = fw_hdr->boot_params_load_address;
- runtime_size = fw_hdr->runtime_size;
- image_load_addr = fw_hdr->image_load_address;
- image_size = fw_hdr->image_size;
+ boot_params_addr = fw_hdr->boot_params_load_address;
+ boot_params_size = SZ_4K;
+
+ if (!ivpu_is_within_range(boot_params_addr, boot_params_size, &vdev->hw->ranges.runtime)) {
+ ivpu_err(vdev, "Invalid boot params address: 0x%llx\n", boot_params_addr);
+ return -EINVAL;
+ }
+
+ fw_version_addr = fw_hdr->firmware_version_load_address;
+ fw_version_size = ALIGN(fw_hdr->firmware_version_size, SZ_4K);
- if (runtime_addr < FW_RUNTIME_MIN_ADDR || runtime_addr > FW_RUNTIME_MAX_ADDR) {
- ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx\n", runtime_addr);
+ if (fw_version_size != SZ_4K) {
+ ivpu_err(vdev, "Invalid firmware version size: %u\n",
+ fw_hdr->firmware_version_size);
return -EINVAL;
}
- if (runtime_size < fw->file->size || runtime_size > FW_RUNTIME_MAX_SIZE) {
- ivpu_err(vdev, "Invalid firmware runtime size: %llu\n", runtime_size);
+ if (!ivpu_is_within_range(fw_version_addr, fw_version_size, &vdev->hw->ranges.runtime)) {
+ ivpu_err(vdev, "Invalid firmware version address: 0x%llx\n", fw_version_addr);
+ return -EINVAL;
+ }
+
+ runtime_addr = fw_hdr->image_load_address;
+ runtime_size = fw_hdr->runtime_size - boot_params_size - fw_version_size;
+
+ image_load_addr = fw_hdr->image_load_address;
+ image_size = fw_hdr->image_size;
+
+ if (!ivpu_is_within_range(runtime_addr, runtime_size, &vdev->hw->ranges.runtime)) {
+ ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx and size %llu\n",
+ runtime_addr, runtime_size);
return -EINVAL;
}
@@ -187,23 +251,25 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
return -EINVAL;
}
- if (image_load_addr < runtime_addr ||
- image_load_addr + image_size > runtime_addr + runtime_size) {
- ivpu_err(vdev, "Invalid firmware load address size: 0x%llx and size %llu\n",
+ if (!ivpu_is_within_range(image_load_addr, image_size, &vdev->hw->ranges.runtime)) {
+ ivpu_err(vdev, "Invalid firmware load address: 0x%llx and size %llu\n",
image_load_addr, image_size);
return -EINVAL;
}
- if (fw_hdr->shave_nn_fw_size > FW_SHAVE_NN_MAX_SIZE) {
- ivpu_err(vdev, "SHAVE NN firmware is too big: %u\n", fw_hdr->shave_nn_fw_size);
+ if (ivpu_hw_range_init(vdev, &fw_image_range, image_load_addr, image_size))
return -EINVAL;
- }
- if (fw_hdr->entry_point < image_load_addr ||
- fw_hdr->entry_point >= image_load_addr + image_size) {
+ if (!ivpu_is_within_range(fw_hdr->entry_point, SZ_4K, &fw_image_range)) {
ivpu_err(vdev, "Invalid entry point: 0x%llx\n", fw_hdr->entry_point);
return -EINVAL;
}
+
+ if (fw_hdr->shave_nn_fw_size > FW_SHAVE_NN_MAX_SIZE) {
+ ivpu_err(vdev, "SHAVE NN firmware is too big: %u\n", fw_hdr->shave_nn_fw_size);
+ return -EINVAL;
+ }
+
ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n",
fw_hdr->header_version, fw_hdr->image_format);
@@ -217,6 +283,10 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, JSM, 3))
return -EINVAL;
+ fw->boot_params_addr = boot_params_addr;
+ fw->boot_params_size = boot_params_size;
+ fw->fw_version_addr = fw_version_addr;
+ fw->fw_version_size = fw_version_size;
fw->runtime_addr = runtime_addr;
fw->runtime_size = runtime_size;
fw->image_load_offset = image_load_addr - runtime_addr;
@@ -235,22 +305,13 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
fw->sched_mode = ivpu_fw_sched_mode_select(vdev, fw_hdr);
ivpu_info(vdev, "Scheduler mode: %s\n", fw->sched_mode ? "HW" : "OS");
- if (fw_hdr->preemption_buffer_1_max_size)
- fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size;
- else
- fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
+ ivpu_preemption_config_parse(vdev, fw_hdr);
+ ivpu_dbg(vdev, FW_BOOT, "Mid-inference preemption %s supported\n",
+ ivpu_fw_preempt_buf_size(vdev) ? "is" : "is not");
- if (fw_hdr->preemption_buffer_2_max_size)
- fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size;
- else
- fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
- ivpu_dbg(vdev, FW_BOOT, "Preemption buffer sizes: primary %u, secondary %u\n",
- fw->primary_preempt_buf_size, fw->secondary_preempt_buf_size);
-
- if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address,
- fw_hdr->ro_section_size,
- fw_hdr->image_load_address,
- fw_hdr->image_size)) {
+ if (fw_hdr->ro_section_start_address &&
+ !ivpu_is_within_range(fw_hdr->ro_section_start_address, fw_hdr->ro_section_size,
+ &fw_image_range)) {
ivpu_err(vdev, "Invalid read-only section: start address 0x%llx, size %u\n",
fw_hdr->ro_section_start_address, fw_hdr->ro_section_size);
return -EINVAL;
@@ -259,12 +320,18 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
fw->read_only_addr = fw_hdr->ro_section_start_address;
fw->read_only_size = fw_hdr->ro_section_size;
- ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n",
- fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size);
- ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n",
- fw->runtime_addr, image_load_addr, fw->entry_point);
+ ivpu_dbg(vdev, FW_BOOT, "Boot params: address 0x%llx, size %llu\n",
+ fw->boot_params_addr, fw->boot_params_size);
+ ivpu_dbg(vdev, FW_BOOT, "FW version: address 0x%llx, size %llu\n",
+ fw->fw_version_addr, fw->fw_version_size);
+ ivpu_dbg(vdev, FW_BOOT, "Runtime: address 0x%llx, size %u\n",
+ fw->runtime_addr, fw->runtime_size);
+ ivpu_dbg(vdev, FW_BOOT, "Image load offset: 0x%llx, size %u\n",
+ fw->image_load_offset, fw->image_size);
ivpu_dbg(vdev, FW_BOOT, "Read-only section: address 0x%llx, size %u\n",
fw->read_only_addr, fw->read_only_size);
+ ivpu_dbg(vdev, FW_BOOT, "FW entry point: 0x%llx\n", fw->entry_point);
+ ivpu_dbg(vdev, FW_BOOT, "SHAVE NN size: %u\n", fw->shave_nn_size);
return 0;
}
@@ -291,39 +358,33 @@ ivpu_fw_init_wa(struct ivpu_device *vdev)
IVPU_PRINT_WA(disable_d0i3_msg);
}
-static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
-{
- struct ivpu_fw_info *fw = vdev->fw;
- u64 start = ALIGN(fw->runtime_addr + fw->runtime_size, FW_SHARED_MEM_ALIGNMENT);
- u64 size = FW_SHARED_MEM_SIZE;
-
- if (start + size > FW_GLOBAL_MEM_END) {
- ivpu_err(vdev, "No space for shared region, start %lld, size %lld\n", start, size);
- return -EINVAL;
- }
-
- ivpu_hw_range_init(&vdev->hw->ranges.global, start, size);
- return 0;
-}
-
static int ivpu_fw_mem_init(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
- struct ivpu_addr_range fw_range;
int log_verb_size;
int ret;
- ret = ivpu_fw_update_global_range(vdev);
- if (ret)
- return ret;
+ fw->mem_bp = ivpu_bo_create_runtime(vdev, fw->boot_params_addr, fw->boot_params_size,
+ DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
+ if (!fw->mem_bp) {
+ ivpu_err(vdev, "Failed to create firmware boot params memory buffer\n");
+ return -ENOMEM;
+ }
+
+ fw->mem_fw_ver = ivpu_bo_create_runtime(vdev, fw->fw_version_addr, fw->fw_version_size,
+ DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
+ if (!fw->mem_fw_ver) {
+ ivpu_err(vdev, "Failed to create firmware version memory buffer\n");
+ ret = -ENOMEM;
+ goto err_free_bp;
+ }
- fw_range.start = fw->runtime_addr;
- fw_range.end = fw->runtime_addr + fw->runtime_size;
- fw->mem = ivpu_bo_create(vdev, &vdev->gctx, &fw_range, fw->runtime_size,
- DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
+ fw->mem = ivpu_bo_create_runtime(vdev, fw->runtime_addr, fw->runtime_size,
+ DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
if (!fw->mem) {
ivpu_err(vdev, "Failed to create firmware runtime memory buffer\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_free_fw_ver;
}
ret = ivpu_mmu_context_set_pages_ro(vdev, &vdev->gctx, fw->read_only_addr,
@@ -372,6 +433,10 @@ err_free_log_crit:
ivpu_bo_free(fw->mem_log_crit);
err_free_fw_mem:
ivpu_bo_free(fw->mem);
+err_free_fw_ver:
+ ivpu_bo_free(fw->mem_fw_ver);
+err_free_bp:
+ ivpu_bo_free(fw->mem_bp);
return ret;
}
@@ -387,10 +452,14 @@ static void ivpu_fw_mem_fini(struct ivpu_device *vdev)
ivpu_bo_free(fw->mem_log_verb);
ivpu_bo_free(fw->mem_log_crit);
ivpu_bo_free(fw->mem);
+ ivpu_bo_free(fw->mem_fw_ver);
+ ivpu_bo_free(fw->mem_bp);
fw->mem_log_verb = NULL;
fw->mem_log_crit = NULL;
fw->mem = NULL;
+ fw->mem_fw_ver = NULL;
+ fw->mem_bp = NULL;
}
int ivpu_fw_init(struct ivpu_device *vdev)
@@ -483,11 +552,6 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = 0x%x\n",
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg);
- ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_base = 0x%llx\n",
- boot_params->global_memory_allocator_base);
- ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_size = 0x%x\n",
- boot_params->global_memory_allocator_size);
-
ivpu_dbg(vdev, FW_BOOT, "boot_params.shave_nn_fw_base = 0x%llx\n",
boot_params->shave_nn_fw_base);
@@ -495,10 +559,6 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
boot_params->watchdog_irq_mss);
ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_nce = 0x%x\n",
boot_params->watchdog_irq_nce);
- ivpu_dbg(vdev, FW_BOOT, "boot_params.host_to_vpu_irq = 0x%x\n",
- boot_params->host_to_vpu_irq);
- ivpu_dbg(vdev, FW_BOOT, "boot_params.job_done_irq = 0x%x\n",
- boot_params->job_done_irq);
ivpu_dbg(vdev, FW_BOOT, "boot_params.host_version_id = 0x%x\n",
boot_params->host_version_id);
@@ -546,6 +606,8 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
boot_params->system_time_us);
ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = 0x%x\n",
boot_params->power_profile);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_uses_ecc_mca_signal = 0x%x\n",
+ boot_params->vpu_uses_ecc_mca_signal);
}
void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
@@ -572,6 +634,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
return;
}
+ memset(boot_params, 0, sizeof(*boot_params));
vdev->pm->is_warmboot = false;
boot_params->magic = VPU_BOOT_PARAMS_MAGIC;
@@ -647,6 +710,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->d0i3_entry_vpu_ts = 0;
if (IVPU_WA(disable_d0i2))
boot_params->power_profile |= BIT(1);
+ boot_params->vpu_uses_ecc_mca_signal =
+ ivpu_hw_uses_ecc_mca_signal(vdev) ? VPU_BOOT_MCA_ECC_BOTH : 0;
boot_params->system_time_us = ktime_to_us(ktime_get_real());
wmb(); /* Flush WC buffers after writing bootparams */
diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h
index 7081913fb0dd..00945892b55e 100644
--- a/drivers/accel/ivpu/ivpu_fw.h
+++ b/drivers/accel/ivpu/ivpu_fw.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_FW_H__
@@ -19,10 +19,16 @@ struct ivpu_fw_info {
const struct firmware *file;
const char *name;
char version[FW_VERSION_STR_SIZE];
+ struct ivpu_bo *mem_bp;
+ struct ivpu_bo *mem_fw_ver;
struct ivpu_bo *mem;
struct ivpu_bo *mem_shave_nn;
struct ivpu_bo *mem_log_crit;
struct ivpu_bo *mem_log_verb;
+ u64 boot_params_addr;
+ u64 boot_params_size;
+ u64 fw_version_addr;
+ u64 fw_version_size;
u64 runtime_addr;
u32 runtime_size;
u64 image_load_offset;
@@ -42,6 +48,7 @@ struct ivpu_fw_info {
u64 last_heartbeat;
};
+bool ivpu_is_within_range(u64 addr, size_t size, struct ivpu_addr_range *range);
int ivpu_fw_init(struct ivpu_device *vdev);
void ivpu_fw_fini(struct ivpu_device *vdev);
void ivpu_fw_load(struct ivpu_device *vdev);
@@ -52,4 +59,9 @@ static inline bool ivpu_fw_is_cold_boot(struct ivpu_device *vdev)
return vdev->fw->entry_point == vdev->fw->cold_boot_entry_point;
}
+static inline u32 ivpu_fw_preempt_buf_size(struct ivpu_device *vdev)
+{
+ return vdev->fw->primary_preempt_buf_size + vdev->fw->secondary_preempt_buf_size;
+}
+
#endif /* __IVPU_FW_H__ */
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index 59cfcf3eaded..e7277e02840a 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -15,6 +15,7 @@
#include <drm/drm_utils.h>
#include "ivpu_drv.h"
+#include "ivpu_fw.h"
#include "ivpu_gem.h"
#include "ivpu_hw.h"
#include "ivpu_mmu.h"
@@ -27,8 +28,8 @@ static const struct drm_gem_object_funcs ivpu_gem_funcs;
static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action)
{
ivpu_dbg(vdev, BO,
- "%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
- action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx_id,
+ "%6s: bo %8p size %9zu ctx %d vpu_addr %9llx pages %d sgt %d mmu_mapped %d wc %d imported %d\n",
+ action, bo, ivpu_bo_size(bo), bo->ctx_id, bo->vpu_addr,
(bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
(bool)drm_gem_is_imported(&bo->base.base));
}
@@ -43,22 +44,47 @@ static inline void ivpu_bo_unlock(struct ivpu_bo *bo)
dma_resv_unlock(bo->base.base.resv);
}
+static struct sg_table *ivpu_bo_map_attachment(struct ivpu_device *vdev, struct ivpu_bo *bo)
+{
+ struct sg_table *sgt;
+
+ drm_WARN_ON(&vdev->drm, !bo->base.base.import_attach);
+
+ ivpu_bo_lock(bo);
+
+ sgt = bo->base.sgt;
+ if (!sgt) {
+ sgt = dma_buf_map_attachment(bo->base.base.import_attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt))
+ ivpu_err(vdev, "Failed to map BO in IOMMU: %ld\n", PTR_ERR(sgt));
+ else
+ bo->base.sgt = sgt;
+ }
+
+ ivpu_bo_unlock(bo);
+
+ return sgt;
+}
+
/*
- * ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
+ * ivpu_bo_bind() - pin the backing physical pages and map them to VPU.
*
* This function pins physical memory pages, then maps the physical pages
* to IOMMU address space and finally updates the VPU MMU page tables
* to allow the VPU to translate VPU address to IOMMU address.
*/
-int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
+int __must_check ivpu_bo_bind(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
struct sg_table *sgt;
int ret = 0;
- ivpu_dbg_bo(vdev, bo, "pin");
+ ivpu_dbg_bo(vdev, bo, "bind");
- sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
+ if (bo->base.base.import_attach)
+ sgt = ivpu_bo_map_attachment(vdev, bo);
+ else
+ sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
@@ -99,7 +125,9 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node);
if (!ret) {
bo->ctx = ctx;
+ bo->ctx_id = ctx->id;
bo->vpu_addr = bo->mm_node.start;
+ ivpu_dbg_bo(vdev, bo, "vaddr");
} else {
ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret);
}
@@ -115,7 +143,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
- lockdep_assert(dma_resv_held(bo->base.base.resv) || !kref_read(&bo->base.base.refcount));
+ dma_resv_assert_held(bo->base.base.resv);
if (bo->mmu_mapped) {
drm_WARN_ON(&vdev->drm, !bo->ctx);
@@ -134,9 +162,14 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
return;
if (bo->base.sgt) {
- dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
- sg_free_table(bo->base.sgt);
- kfree(bo->base.sgt);
+ if (bo->base.base.import_attach) {
+ dma_buf_unmap_attachment(bo->base.base.import_attach,
+ bo->base.sgt, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
+ sg_free_table(bo->base.sgt);
+ kfree(bo->base.sgt);
+ }
bo->base.sgt = NULL;
}
}
@@ -182,10 +215,11 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz
struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
+ struct ivpu_device *vdev = to_ivpu_device(dev);
struct device *attach_dev = dev->dev;
struct dma_buf_attachment *attach;
- struct sg_table *sgt;
struct drm_gem_object *obj;
+ struct ivpu_bo *bo;
int ret;
attach = dma_buf_attach(dma_buf, attach_dev);
@@ -194,25 +228,25 @@ struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev,
get_dma_buf(dma_buf);
- sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sgt)) {
- ret = PTR_ERR(sgt);
- goto fail_detach;
- }
-
- obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
+ obj = drm_gem_shmem_prime_import_sg_table(dev, attach, NULL);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
- goto fail_unmap;
+ goto fail_detach;
}
obj->import_attach = attach;
obj->resv = dma_buf->resv;
+ bo = to_ivpu_bo(obj);
+
+ mutex_lock(&vdev->bo_list_lock);
+ list_add_tail(&bo->bo_list_node, &vdev->bo_list);
+ mutex_unlock(&vdev->bo_list_lock);
+
+ ivpu_dbg(vdev, BO, "import: bo %8p size %9zu\n", bo, ivpu_bo_size(bo));
+
return obj;
-fail_unmap:
- dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
fail_detach:
dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf);
@@ -220,7 +254,7 @@ fail_detach:
return ERR_PTR(ret);
}
-static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, u32 ctx_id)
+static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags)
{
struct drm_gem_shmem_object *shmem;
struct ivpu_bo *bo;
@@ -238,7 +272,6 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla
return ERR_CAST(shmem);
bo = to_ivpu_bo(&shmem->base);
- bo->ctx_id = ctx_id;
bo->base.map_wc = flags & DRM_IVPU_BO_WC;
bo->flags = flags;
@@ -246,7 +279,7 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla
list_add_tail(&bo->bo_list_node, &vdev->bo_list);
mutex_unlock(&vdev->bo_list_lock);
- ivpu_dbg_bo(vdev, bo, "alloc");
+ ivpu_dbg(vdev, BO, " alloc: bo %8p size %9llu\n", bo, size);
return bo;
}
@@ -281,6 +314,8 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
ivpu_dbg_bo(vdev, bo, "free");
+ drm_WARN_ON(&vdev->drm, list_empty(&bo->bo_list_node));
+
mutex_lock(&vdev->bo_list_lock);
list_del(&bo->bo_list_node);
mutex_unlock(&vdev->bo_list_lock);
@@ -290,11 +325,15 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
drm_WARN_ON(&vdev->drm, ivpu_bo_size(bo) == 0);
drm_WARN_ON(&vdev->drm, bo->base.vaddr);
+ ivpu_bo_lock(bo);
ivpu_bo_unbind_locked(bo);
+ ivpu_bo_unlock(bo);
+
drm_WARN_ON(&vdev->drm, bo->mmu_mapped);
drm_WARN_ON(&vdev->drm, bo->ctx);
drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1);
+ drm_WARN_ON(obj->dev, bo->base.base.vma_node.vm_files.rb_node);
drm_gem_shmem_free(&bo->base);
}
@@ -326,19 +365,23 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
if (size == 0)
return -EINVAL;
- bo = ivpu_bo_alloc(vdev, size, args->flags, file_priv->ctx.id);
+ bo = ivpu_bo_alloc(vdev, size, args->flags);
if (IS_ERR(bo)) {
ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)",
bo, file_priv->ctx.id, args->size, args->flags);
return PTR_ERR(bo);
}
+ drm_WARN_ON(&vdev->drm, bo->base.base.handle_count != 0);
+
ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
- if (ret)
+ if (ret) {
ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)",
bo, file_priv->ctx.id, args->size, args->flags);
- else
+ } else {
args->vpu_addr = bo->vpu_addr;
+ drm_WARN_ON(&vdev->drm, bo->base.base.handle_count != 1);
+ }
drm_gem_object_put(&bo->base.base);
@@ -360,7 +403,7 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end));
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
- bo = ivpu_bo_alloc(vdev, size, flags, IVPU_GLOBAL_CONTEXT_MMU_SSID);
+ bo = ivpu_bo_alloc(vdev, size, flags);
if (IS_ERR(bo)) {
ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
bo, range->start, size, flags);
@@ -371,7 +414,7 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
if (ret)
goto err_put;
- ret = ivpu_bo_pin(bo);
+ ret = ivpu_bo_bind(bo);
if (ret)
goto err_put;
@@ -391,6 +434,21 @@ err_put:
return NULL;
}
+struct ivpu_bo *ivpu_bo_create_runtime(struct ivpu_device *vdev, u64 addr, u64 size, u32 flags)
+{
+ struct ivpu_addr_range range;
+
+ if (!ivpu_is_within_range(addr, size, &vdev->hw->ranges.runtime)) {
+ ivpu_err(vdev, "Invalid runtime BO address 0x%llx size %llu\n", addr, size);
+ return NULL;
+ }
+
+ if (ivpu_hw_range_init(vdev, &range, addr, size))
+ return NULL;
+
+ return ivpu_bo_create(vdev, &vdev->gctx, &range, size, flags);
+}
+
struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags)
{
return ivpu_bo_create(vdev, &vdev->gctx, &vdev->hw->ranges.global, size, flags);
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
index aa8ff14f7aae..54452eb8a41f 100644
--- a/drivers/accel/ivpu/ivpu_gem.h
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_GEM_H__
#define __IVPU_GEM_H__
@@ -24,13 +24,14 @@ struct ivpu_bo {
bool mmu_mapped;
};
-int ivpu_bo_pin(struct ivpu_bo *bo);
+int ivpu_bo_bind(struct ivpu_bo *bo);
void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size);
struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
struct ivpu_bo *ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
struct ivpu_addr_range *range, u64 size, u32 flags);
+struct ivpu_bo *ivpu_bo_create_runtime(struct ivpu_device *vdev, u64 addr, u64 size, u32 flags);
struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags);
void ivpu_bo_free(struct ivpu_bo *bo);
@@ -96,4 +97,9 @@ static inline u32 cpu_to_vpu_addr(struct ivpu_bo *bo, void *cpu_addr)
return bo->vpu_addr + (cpu_addr - ivpu_bo_vaddr(bo));
}
+static inline bool ivpu_bo_is_mappable(struct ivpu_bo *bo)
+{
+ return bo->flags & DRM_IVPU_BO_MAPPABLE;
+}
+
#endif /* __IVPU_GEM_H__ */
diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c
index 08dcc31b56f4..d69cd0d93569 100644
--- a/drivers/accel/ivpu/ivpu_hw.c
+++ b/drivers/accel/ivpu/ivpu_hw.c
@@ -8,6 +8,8 @@
#include "ivpu_hw_btrs.h"
#include "ivpu_hw_ip.h"
+#include <asm/msr-index.h>
+#include <asm/msr.h>
#include <linux/dmi.h>
#include <linux/fault-inject.h>
#include <linux/pm_runtime.h>
@@ -20,6 +22,10 @@ module_param_named_unsafe(fail_hw, ivpu_fail_hw, charp, 0444);
MODULE_PARM_DESC(fail_hw, "<interval>,<probability>,<space>,<times>");
#endif
+#define FW_SHARED_MEM_ALIGNMENT SZ_512K /* VPU MTRR limitation */
+
+#define ECC_MCA_SIGNAL_ENABLE_MASK 0xff
+
static char *platform_to_str(u32 platform)
{
switch (platform) {
@@ -147,19 +153,39 @@ static void priority_bands_init(struct ivpu_device *vdev)
vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 200000;
}
+int ivpu_hw_range_init(struct ivpu_device *vdev, struct ivpu_addr_range *range, u64 start, u64 size)
+{
+ u64 end;
+
+ if (!range || check_add_overflow(start, size, &end)) {
+ ivpu_err(vdev, "Invalid range: start 0x%llx size %llu\n", start, size);
+ return -EINVAL;
+ }
+
+ range->start = start;
+ range->end = end;
+
+ return 0;
+}
+
static void memory_ranges_init(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
- ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
- ivpu_hw_range_init(&vdev->hw->ranges.user, 0x88000000, 511 * SZ_1M);
- ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x180000000, SZ_2G);
- ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_128G);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.runtime, 0x84800000, SZ_64M);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.global, 0x90000000, SZ_256M);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.user, 0xa0000000, 511 * SZ_1M);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.shave, 0x180000000, SZ_2G);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.dma, 0x200000000, SZ_128G);
} else {
- ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
- ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x80000000, SZ_2G);
- ivpu_hw_range_init(&vdev->hw->ranges.user, 0x100000000, SZ_256G);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.runtime, 0x80000000, SZ_64M);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.global, 0x90000000, SZ_256M);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.shave, 0x80000000, SZ_2G);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.user, 0x100000000, SZ_256G);
vdev->hw->ranges.dma = vdev->hw->ranges.user;
}
+
+ drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vdev->hw->ranges.global.start,
+ FW_SHARED_MEM_ALIGNMENT));
}
static int wp_enable(struct ivpu_device *vdev)
@@ -373,3 +399,22 @@ irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr)
pm_runtime_mark_last_busy(vdev->drm.dev);
return IRQ_HANDLED;
}
+
+bool ivpu_hw_uses_ecc_mca_signal(struct ivpu_device *vdev)
+{
+ unsigned long long msr_integrity_caps;
+ int ret;
+
+ if (ivpu_hw_ip_gen(vdev) < IVPU_HW_IP_50XX)
+ return false;
+
+ ret = rdmsrq_safe(MSR_INTEGRITY_CAPS, &msr_integrity_caps);
+ if (ret) {
+ ivpu_warn(vdev, "Error reading MSR_INTEGRITY_CAPS: %d", ret);
+ return false;
+ }
+
+ ivpu_dbg(vdev, MISC, "MSR_INTEGRITY_CAPS: 0x%llx\n", msr_integrity_caps);
+
+ return msr_integrity_caps & ECC_MCA_SIGNAL_ENABLE_MASK;
+}
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index d79668fe1609..b6d0f0d0dccc 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -21,6 +21,7 @@ struct ivpu_hw_info {
bool (*ip_irq_handler)(struct ivpu_device *vdev, int irq);
} irq;
struct {
+ struct ivpu_addr_range runtime;
struct ivpu_addr_range global;
struct ivpu_addr_range user;
struct ivpu_addr_range shave;
@@ -51,6 +52,8 @@ struct ivpu_hw_info {
};
int ivpu_hw_init(struct ivpu_device *vdev);
+int ivpu_hw_range_init(struct ivpu_device *vdev, struct ivpu_addr_range *range, u64 start,
+ u64 size);
int ivpu_hw_power_up(struct ivpu_device *vdev);
int ivpu_hw_power_down(struct ivpu_device *vdev);
int ivpu_hw_reset(struct ivpu_device *vdev);
@@ -60,6 +63,7 @@ void ivpu_irq_handlers_init(struct ivpu_device *vdev);
void ivpu_hw_irq_enable(struct ivpu_device *vdev);
void ivpu_hw_irq_disable(struct ivpu_device *vdev);
irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr);
+bool ivpu_hw_uses_ecc_mca_signal(struct ivpu_device *vdev);
static inline u32 ivpu_hw_btrs_irq_handler(struct ivpu_device *vdev, int irq)
{
@@ -71,12 +75,6 @@ static inline u32 ivpu_hw_ip_irq_handler(struct ivpu_device *vdev, int irq)
return vdev->hw->irq.ip_irq_handler(vdev, irq);
}
-static inline void ivpu_hw_range_init(struct ivpu_addr_range *range, u64 start, u64 size)
-{
- range->start = start;
- range->end = start + size;
-}
-
static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range)
{
return range->end - range->start;
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.c b/drivers/accel/ivpu/ivpu_hw_btrs.c
index afdb3b2aa72a..aa33f562d29c 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.c
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.c
@@ -752,7 +752,7 @@ int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable)
}
}
-void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent)
+void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u8 active_percent)
{
u32 val = 0;
u32 cmd = enable ? DCT_ENABLE : DCT_DISABLE;
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.h b/drivers/accel/ivpu/ivpu_hw_btrs.h
index 032c384ac3d4..c4c10e22f30f 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.h
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.h
@@ -36,7 +36,7 @@ u32 ivpu_hw_btrs_dpu_freq_get(struct ivpu_device *vdev);
bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq);
bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq);
int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable);
-void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent);
+void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u8 active_percent);
u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index 060f1fc031d3..ba4535a75aa7 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -34,22 +34,20 @@ static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
- u64 primary_size = ALIGN(vdev->fw->primary_preempt_buf_size, PAGE_SIZE);
- u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE);
-
- if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW ||
- ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE)
+ if (ivpu_fw_preempt_buf_size(vdev) == 0)
return 0;
cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.user,
- primary_size, DRM_IVPU_BO_WC);
+ vdev->fw->primary_preempt_buf_size,
+ DRM_IVPU_BO_WC);
if (!cmdq->primary_preempt_buf) {
ivpu_err(vdev, "Failed to create primary preemption buffer\n");
return -ENOMEM;
}
cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.dma,
- secondary_size, DRM_IVPU_BO_WC);
+ vdev->fw->secondary_preempt_buf_size,
+ DRM_IVPU_BO_WC);
if (!cmdq->secondary_preempt_buf) {
ivpu_err(vdev, "Failed to create secondary preemption buffer\n");
goto err_free_primary;
@@ -66,20 +64,39 @@ err_free_primary:
static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
- if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
- return;
-
if (cmdq->primary_preempt_buf)
ivpu_bo_free(cmdq->primary_preempt_buf);
if (cmdq->secondary_preempt_buf)
ivpu_bo_free(cmdq->secondary_preempt_buf);
}
+static int ivpu_preemption_job_init(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv,
+ struct ivpu_cmdq *cmdq, struct ivpu_job *job)
+{
+ int ret;
+
+ /* Use preemption buffer provided by the user space */
+ if (job->primary_preempt_buf)
+ return 0;
+
+ if (!cmdq->primary_preempt_buf) {
+ /* Allocate per command queue preemption buffers */
+ ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
+ if (ret)
+ return ret;
+ }
+
+ /* Use preemption buffers allocated by the kernel */
+ job->primary_preempt_buf = cmdq->primary_preempt_buf;
+ job->secondary_preempt_buf = cmdq->secondary_preempt_buf;
+
+ return 0;
+}
+
static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
{
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_cmdq *cmdq;
- int ret;
cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL);
if (!cmdq)
@@ -89,10 +106,6 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
if (!cmdq->mem)
goto err_free_cmdq;
- ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
- if (ret)
- ivpu_warn(vdev, "Failed to allocate preemption buffers, preemption limited\n");
-
return cmdq;
err_free_cmdq:
@@ -219,11 +232,13 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
- if (!ret)
+ if (!ret) {
ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n",
cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority);
- else
+ } else {
xa_erase(&vdev->db_xa, cmdq->db_id);
+ cmdq->db_id = 0;
+ }
return ret;
}
@@ -427,17 +442,14 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
- if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
- if (cmdq->primary_preempt_buf) {
- entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
- entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
- }
+ if (job->primary_preempt_buf) {
+ entry->primary_preempt_buf_addr = job->primary_preempt_buf->vpu_addr;
+ entry->primary_preempt_buf_size = ivpu_bo_size(job->primary_preempt_buf);
+ }
- if (cmdq->secondary_preempt_buf) {
- entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr;
- entry->secondary_preempt_buf_size =
- ivpu_bo_size(cmdq->secondary_preempt_buf);
- }
+ if (job->secondary_preempt_buf) {
+ entry->secondary_preempt_buf_addr = job->secondary_preempt_buf->vpu_addr;
+ entry->secondary_preempt_buf_size = ivpu_bo_size(job->secondary_preempt_buf);
}
wmb(); /* Ensure that tail is updated after filling entry */
@@ -552,21 +564,26 @@ static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device *
return job;
}
-static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
+bool ivpu_job_handle_engine_error(struct ivpu_device *vdev, u32 job_id, u32 job_status)
{
- struct ivpu_job *job;
-
lockdep_assert_held(&vdev->submitted_jobs_lock);
- job = xa_load(&vdev->submitted_jobs_xa, job_id);
- if (!job)
- return -ENOENT;
+ switch (job_status) {
+ case VPU_JSM_STATUS_PROCESSING_ERR:
+ case VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MIN ... VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MAX:
+ {
+ struct ivpu_job *job = xa_load(&vdev->submitted_jobs_xa, job_id);
+
+ if (!job)
+ return false;
- if (job_status == VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW) {
+ /* Trigger an engine reset */
guard(mutex)(&job->file_priv->lock);
+ job->job_status = job_status;
+
if (job->file_priv->has_mmu_faults)
- return 0;
+ return false;
/*
* Mark context as faulty and defer destruction of the job to jobs abort thread
@@ -575,22 +592,42 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
*/
job->file_priv->has_mmu_faults = true;
queue_work(system_wq, &vdev->context_abort_work);
- return 0;
+ return true;
}
+ default:
+ /* Complete job with error status, engine reset not required */
+ break;
+ }
+
+ return false;
+}
+
+static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
+{
+ struct ivpu_job *job;
+
+ lockdep_assert_held(&vdev->submitted_jobs_lock);
- job = ivpu_job_remove_from_submitted_jobs(vdev, job_id);
+ job = xa_load(&vdev->submitted_jobs_xa, job_id);
if (!job)
return -ENOENT;
- if (job->file_priv->has_mmu_faults)
- job_status = DRM_IVPU_JOB_STATUS_ABORTED;
+ ivpu_job_remove_from_submitted_jobs(vdev, job_id);
- job->bos[CMD_BUF_IDX]->job_status = job_status;
+ if (job->job_status == VPU_JSM_STATUS_SUCCESS) {
+ if (job->file_priv->has_mmu_faults)
+ job->job_status = DRM_IVPU_JOB_STATUS_ABORTED;
+ else
+ job->job_status = job_status;
+ }
+
+ job->bos[CMD_BUF_IDX]->job_status = job->job_status;
dma_fence_signal(job->done_fence);
trace_job("done", job);
ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d cmdq_id %u engine %d status 0x%x\n",
- job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx, job_status);
+ job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx,
+ job->job_status);
ivpu_job_destroy(job);
ivpu_stop_job_timeout_detection(vdev);
@@ -661,6 +698,13 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
goto err_unlock;
}
+ ret = ivpu_preemption_job_init(vdev, file_priv, cmdq, job);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize preemption buffers for job %d: %d\n",
+ job->job_id, ret);
+ goto err_unlock;
+ }
+
job->cmdq_id = cmdq->id;
is_first_job = xa_empty(&vdev->submitted_jobs_xa);
@@ -714,7 +758,7 @@ err_unlock:
static int
ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles,
- u32 buf_count, u32 commands_offset)
+ u32 buf_count, u32 commands_offset, u32 preempt_buffer_index)
{
struct ivpu_file_priv *file_priv = job->file_priv;
struct ivpu_device *vdev = file_priv->vdev;
@@ -732,7 +776,7 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
job->bos[i] = to_ivpu_bo(obj);
- ret = ivpu_bo_pin(job->bos[i]);
+ ret = ivpu_bo_bind(job->bos[i]);
if (ret)
return ret;
}
@@ -750,6 +794,20 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
+ if (preempt_buffer_index) {
+ struct ivpu_bo *preempt_bo = job->bos[preempt_buffer_index];
+
+ if (ivpu_bo_size(preempt_bo) < ivpu_fw_preempt_buf_size(vdev)) {
+ ivpu_warn(vdev, "Preemption buffer is too small\n");
+ return -EINVAL;
+ }
+ if (ivpu_bo_is_mappable(preempt_bo)) {
+ ivpu_warn(vdev, "Preemption buffer cannot be mappable\n");
+ return -EINVAL;
+ }
+ job->primary_preempt_buf = preempt_bo;
+ }
+
ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count,
&acquire_ctx);
if (ret) {
@@ -780,7 +838,7 @@ unlock_reservations:
static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv, u32 cmdq_id,
u32 buffer_count, u32 engine, void __user *buffers_ptr, u32 cmds_offset,
- u8 priority)
+ u32 preempt_buffer_index, u8 priority)
{
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_job *job;
@@ -812,7 +870,8 @@ static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv,
goto err_exit_dev;
}
- ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset);
+ ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset,
+ preempt_buffer_index);
if (ret) {
ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
goto err_destroy_job;
@@ -866,7 +925,7 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
priority = ivpu_job_to_jsm_priority(args->priority);
return ivpu_submit(file, file_priv, 0, args->buffer_count, args->engine,
- (void __user *)args->buffers_ptr, args->commands_offset, priority);
+ (void __user *)args->buffers_ptr, args->commands_offset, 0, priority);
}
int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
@@ -883,6 +942,9 @@ int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *
if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT)
return -EINVAL;
+ if (args->preempt_buffer_index >= args->buffer_count)
+ return -EINVAL;
+
if (!IS_ALIGNED(args->commands_offset, 8))
return -EINVAL;
@@ -893,7 +955,8 @@ int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *
return -EBADFD;
return ivpu_submit(file, file_priv, args->cmdq_id, args->buffer_count, VPU_ENGINE_COMPUTE,
- (void __user *)args->buffers_ptr, args->commands_offset, 0);
+ (void __user *)args->buffers_ptr, args->commands_offset,
+ args->preempt_buffer_index, 0);
}
int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
@@ -984,7 +1047,9 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
mutex_lock(&vdev->submitted_jobs_lock);
- ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
+ if (!ivpu_job_handle_engine_error(vdev, payload->job_id, payload->job_status))
+ /* No engine error, complete the job normally */
+ ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
mutex_unlock(&vdev->submitted_jobs_lock);
}
@@ -1012,7 +1077,7 @@ void ivpu_context_abort_work_fn(struct work_struct *work)
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
if (ivpu_jsm_reset_engine(vdev, 0))
- return;
+ goto runtime_put;
mutex_lock(&vdev->context_list_lock);
xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
@@ -1036,7 +1101,7 @@ void ivpu_context_abort_work_fn(struct work_struct *work)
goto runtime_put;
if (ivpu_jsm_hws_resume_engine(vdev, 0))
- return;
+ goto runtime_put;
/*
* In hardware scheduling mode NPU already has stopped processing jobs
* and won't send us any further notifications, thus we have to free job related resources
diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h
index 2e301c2eea7b..3ab61e6a5616 100644
--- a/drivers/accel/ivpu/ivpu_job.h
+++ b/drivers/accel/ivpu/ivpu_job.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_JOB_H__
@@ -15,12 +15,17 @@ struct ivpu_device;
struct ivpu_file_priv;
/**
- * struct ivpu_cmdq - Object representing device queue used to send jobs.
- * @jobq: Pointer to job queue memory shared with the device
- * @mem: Memory allocated for the job queue, shared with device
- * @entry_count Number of job entries in the queue
- * @db_id: Doorbell assigned to this job queue
- * @db_registered: True if doorbell is registered in device
+ * struct ivpu_cmdq - Represents a command queue for submitting jobs to the VPU.
+ * Tracks queue memory, preemption buffers, and metadata for job management.
+ * @jobq: Pointer to job queue memory shared with the device
+ * @primary_preempt_buf: Primary preemption buffer for this queue (optional)
+ * @secondary_preempt_buf: Secondary preemption buffer for this queue (optional)
+ * @mem: Memory allocated for the job queue, shared with device
+ * @entry_count: Number of job entries in the queue
+ * @id: Unique command queue ID
+ * @db_id: Doorbell ID assigned to this job queue
+ * @priority: Priority level of the command queue
+ * @is_legacy: True if this is a legacy command queue
*/
struct ivpu_cmdq {
struct vpu_job_queue *jobq;
@@ -35,16 +40,22 @@ struct ivpu_cmdq {
};
/**
- * struct ivpu_job - KMD object that represents batchbuffer / DMA buffer.
- * Each batch / DMA buffer is a job to be submitted and executed by the VPU FW.
- * This is a unit of execution, and be tracked by the job_id for
- * any status reporting from VPU FW through IPC JOB RET/DONE message.
- * @file_priv: The client that submitted this job
- * @job_id: Job ID for KMD tracking and job status reporting from VPU FW
- * @status: Status of the Job from IPC JOB RET/DONE message
- * @batch_buffer: CPU vaddr points to the batch buffer memory allocated for the job
- * @submit_status_offset: Offset within batch buffer where job completion handler
- will update the job status
+ * struct ivpu_job - Representing a batch or DMA buffer submitted to the VPU.
+ * Each job is a unit of execution, tracked by job_id for status reporting from VPU FW.
+ * The structure holds all resources and metadata needed for job submission, execution,
+ * and completion handling.
+ * @vdev: Pointer to the VPU device
+ * @file_priv: The client context that submitted this job
+ * @done_fence: Fence signaled when job completes
+ * @cmd_buf_vpu_addr: VPU address of the command buffer for this job
+ * @cmdq_id: Command queue ID used for submission
+ * @job_id: Unique job ID for tracking and status reporting
+ * @engine_idx: Engine index for job execution
+ * @job_status: Status reported by firmware for this job
+ * @primary_preempt_buf: Primary preemption buffer for job
+ * @secondary_preempt_buf: Secondary preemption buffer for job (optional)
+ * @bo_count: Number of buffer objects associated with this job
+ * @bos: Array of buffer objects used by the job (batch buffer is at index 0)
*/
struct ivpu_job {
struct ivpu_device *vdev;
@@ -54,6 +65,9 @@ struct ivpu_job {
u32 cmdq_id;
u32 job_id;
u32 engine_idx;
+ u32 job_status;
+ struct ivpu_bo *primary_preempt_buf;
+ struct ivpu_bo *secondary_preempt_buf;
size_t bo_count;
struct ivpu_bo *bos[] __counted_by(bo_count);
};
@@ -71,6 +85,7 @@ void ivpu_cmdq_abort_all_jobs(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
void ivpu_job_done_consumer_fini(struct ivpu_device *vdev);
+bool ivpu_job_handle_engine_error(struct ivpu_device *vdev, u32 job_id, u32 job_status);
void ivpu_context_abort_work_fn(struct work_struct *work);
void ivpu_jobs_abort_all(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
index f0267efa55aa..4ffc783426be 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.c
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -568,7 +568,7 @@ void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ct
mutex_init(&ctx->lock);
if (!context_id) {
- start = vdev->hw->ranges.global.start;
+ start = vdev->hw->ranges.runtime.start;
end = vdev->hw->ranges.shave.end;
} else {
start = min_t(u64, vdev->hw->ranges.user.start, vdev->hw->ranges.shave.start);
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 475ddc94f1cf..63c95307faa1 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -54,7 +54,7 @@ static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
- struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem);
+ struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem_bp);
if (!bp->save_restore_ret_address) {
ivpu_pm_prepare_cold_boot(vdev);
@@ -502,6 +502,11 @@ void ivpu_pm_irq_dct_work_fn(struct work_struct *work)
else
ret = ivpu_pm_dct_disable(vdev);
- if (!ret)
- ivpu_hw_btrs_dct_set_status(vdev, enable, vdev->pm->dct_active_percent);
+ if (!ret) {
+ /* Convert percent to U1.7 format */
+ u8 val = DIV_ROUND_CLOSEST(vdev->pm->dct_active_percent * 128, 100);
+
+ ivpu_hw_btrs_dct_set_status(vdev, enable, val);
+ }
+
}
diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h
index 4b6b2b3d2583..bca6a44dc041 100644
--- a/drivers/accel/ivpu/vpu_jsm_api.h
+++ b/drivers/accel/ivpu/vpu_jsm_api.h
@@ -1,15 +1,16 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright (c) 2020-2024, Intel Corporation.
+ * Copyright (c) 2020-2025, Intel Corporation.
+ */
+
+/**
+ * @addtogroup Jsm
+ * @{
*/
/**
* @file
* @brief JSM shared definitions
- *
- * @ingroup Jsm
- * @brief JSM shared definitions
- * @{
*/
#ifndef VPU_JSM_API_H
#define VPU_JSM_API_H
@@ -22,7 +23,7 @@
/*
* Minor version changes when API backward compatibility is preserved.
*/
-#define VPU_JSM_API_VER_MINOR 29
+#define VPU_JSM_API_VER_MINOR 33
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
@@ -71,9 +72,15 @@
#define VPU_JSM_STATUS_MVNCI_OUT_OF_RESOURCES 0xAU
#define VPU_JSM_STATUS_MVNCI_NOT_IMPLEMENTED 0xBU
#define VPU_JSM_STATUS_MVNCI_INTERNAL_ERROR 0xCU
-/* Job status returned when the job was preempted mid-inference */
+/* @deprecated (use VPU_JSM_STATUS_PREEMPTED_MID_COMMAND instead) */
#define VPU_JSM_STATUS_PREEMPTED_MID_INFERENCE 0xDU
+/* Job status returned when the job was preempted mid-command */
+#define VPU_JSM_STATUS_PREEMPTED_MID_COMMAND 0xDU
+/* Range of status codes that require engine reset */
+#define VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MIN 0xEU
#define VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW 0xEU
+#define VPU_JSM_STATUS_MVNCI_PREEMPTION_TIMED_OUT 0xFU
+#define VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MAX 0x1FU
/*
* Host <-> VPU IPC channels.
@@ -134,11 +141,21 @@ enum {
* 2. Native fence queues are only supported on VPU 40xx onwards.
*/
VPU_JOB_QUEUE_FLAGS_USE_NATIVE_FENCE_MASK = (1 << 1U),
-
/*
* Enable turbo mode for testing NPU performance; not recommended for regular usage.
*/
- VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U)
+ VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U),
+ /*
+ * Queue error detection mode flag
+ * For 'interactive' queues (this bit not set), the FW will identify queues that have not
+ * completed a job inside the TDR timeout as in error as part of engine reset sequence.
+ * For 'non-interactive' queues (this bit set), the FW will identify queues that have not
+ * progressed the heartbeat inside the non-interactive no-progress timeout as in error as
+ * part of engine reset sequence. Additionally, there is an upper limit applied to these
+ * queues: even if they progress the heartbeat, if they run longer than non-interactive
+ * timeout, then the FW will also identify them as in error.
+ */
+ VPU_JOB_QUEUE_FLAGS_NON_INTERACTIVE = (1 << 3U)
};
/*
@@ -209,7 +226,7 @@ enum {
*/
#define VPU_INLINE_CMD_TYPE_FENCE_SIGNAL 0x2
-/*
+/**
* Job scheduling priority bands for both hardware scheduling and OS scheduling.
*/
enum vpu_job_scheduling_priority_band {
@@ -220,16 +237,16 @@ enum vpu_job_scheduling_priority_band {
VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT = 4,
};
-/*
+/**
* Job format.
* Jobs defines the actual workloads to be executed by a given engine.
*/
struct vpu_job_queue_entry {
- /**< Address of VPU commands batch buffer */
+ /** Address of VPU commands batch buffer */
u64 batch_buf_addr;
- /**< Job ID */
+ /** Job ID */
u32 job_id;
- /**< Flags bit field, see VPU_JOB_FLAGS_* above */
+ /** Flags bit field, see VPU_JOB_FLAGS_* above */
u32 flags;
/**
* Doorbell ring timestamp taken by KMD from SoC's global system clock, in
@@ -237,20 +254,20 @@ struct vpu_job_queue_entry {
* to match other profiling timestamps.
*/
u64 doorbell_timestamp;
- /**< Extra id for job tracking, used only in the firmware perf traces */
+ /** Extra id for job tracking, used only in the firmware perf traces */
u64 host_tracking_id;
- /**< Address of the primary preemption buffer to use for this job */
+ /** Address of the primary preemption buffer to use for this job */
u64 primary_preempt_buf_addr;
- /**< Size of the primary preemption buffer to use for this job */
+ /** Size of the primary preemption buffer to use for this job */
u32 primary_preempt_buf_size;
- /**< Size of secondary preemption buffer to use for this job */
+ /** Size of secondary preemption buffer to use for this job */
u32 secondary_preempt_buf_size;
- /**< Address of secondary preemption buffer to use for this job */
+ /** Address of secondary preemption buffer to use for this job */
u64 secondary_preempt_buf_addr;
u64 reserved_0;
};
-/*
+/**
* Inline command format.
* Inline commands are the commands executed at scheduler level (typically,
* synchronization directives). Inline command and job objects must be of
@@ -258,34 +275,36 @@ struct vpu_job_queue_entry {
*/
struct vpu_inline_cmd {
u64 reserved_0;
- /* Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */
+ /** Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */
u32 type;
- /* Flags bit field, see VPU_JOB_FLAGS_* above. */
+ /** Flags bit field, see VPU_JOB_FLAGS_* above. */
u32 flags;
- /* Inline command payload. Depends on inline command type. */
- union {
- /* Fence (wait and signal) commands' payload. */
- struct {
- /* Fence object handle. */
+ /** Inline command payload. Depends on inline command type. */
+ union payload {
+ /** Fence (wait and signal) commands' payload. */
+ struct fence {
+ /** Fence object handle. */
u64 fence_handle;
- /* User VA of the current fence value. */
+ /** User VA of the current fence value. */
u64 current_value_va;
- /* User VA of the monitored fence value (read-only). */
+ /** User VA of the monitored fence value (read-only). */
u64 monitored_value_va;
- /* Value to wait for or write in fence location. */
+ /** Value to wait for or write in fence location. */
u64 value;
- /* User VA of the log buffer in which to add log entry on completion. */
+ /** User VA of the log buffer in which to add log entry on completion. */
u64 log_buffer_va;
- /* NPU private data. */
+ /** NPU private data. */
u64 npu_private_data;
} fence;
- /* Other commands do not have a payload. */
- /* Payload definition for future inline commands can be inserted here. */
+ /**
+ * Other commands do not have a payload:
+ * Payload definition for future inline commands can be inserted here.
+ */
u64 reserved_1[6];
} payload;
};
-/*
+/**
* Job queue slots can be populated either with job objects or inline command objects.
*/
union vpu_jobq_slot {
@@ -293,7 +312,7 @@ union vpu_jobq_slot {
struct vpu_inline_cmd inline_cmd;
};
-/*
+/**
* Job queue control registers.
*/
struct vpu_job_queue_header {
@@ -301,18 +320,18 @@ struct vpu_job_queue_header {
u32 head;
u32 tail;
u32 flags;
- /* Set to 1 to indicate priority_band field is valid */
+ /** Set to 1 to indicate priority_band field is valid */
u32 priority_band_valid;
- /*
+ /**
* Priority for the work of this job queue, valid only if the HWS is NOT used
- * and the `priority_band_valid` is set to 1. It is applied only during
- * the VPU_JSM_MSG_REGISTER_DB message processing.
- * The device firmware might use the `priority_band` to optimize the power
+ * and the @ref priority_band_valid is set to 1. It is applied only during
+ * the @ref VPU_JSM_MSG_REGISTER_DB message processing.
+ * The device firmware might use the priority_band to optimize the power
* management logic, but it will not affect the order of jobs.
* Available priority bands: @see enum vpu_job_scheduling_priority_band
*/
u32 priority_band;
- /* Inside realtime band assigns a further priority, limited to 0..31 range */
+ /** Inside realtime band assigns a further priority, limited to 0..31 range */
u32 realtime_priority_level;
u32 reserved_0[9];
};
@@ -337,16 +356,16 @@ enum vpu_trace_entity_type {
VPU_TRACE_ENTITY_TYPE_HW_COMPONENT = 2,
};
-/*
+/**
* HWS specific log buffer header details.
* Total size is 32 bytes.
*/
struct vpu_hws_log_buffer_header {
- /* Written by VPU after adding a log entry. Initialised by host to 0. */
+ /** Written by VPU after adding a log entry. Initialised by host to 0. */
u32 first_free_entry_index;
- /* Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */
+ /** Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */
u32 wraparound_count;
- /*
+ /**
* This is the number of buffers that can be stored in the log buffer provided by the host.
* It is written by host before passing buffer to VPU. VPU should consider it read-only.
*/
@@ -354,14 +373,14 @@ struct vpu_hws_log_buffer_header {
u64 reserved[2];
};
-/*
+/**
* HWS specific log buffer entry details.
* Total size is 32 bytes.
*/
struct vpu_hws_log_buffer_entry {
- /* VPU timestamp must be an invariant timer tick (not impacted by DVFS) */
+ /** VPU timestamp must be an invariant timer tick (not impacted by DVFS) */
u64 vpu_timestamp;
- /*
+ /**
* Operation type:
* 0 - context state change
* 1 - queue new work
@@ -371,7 +390,7 @@ struct vpu_hws_log_buffer_entry {
*/
u32 operation_type;
u32 reserved;
- /* Operation data depends on operation type */
+ /** Operation data depends on operation type */
u64 operation_data[2];
};
@@ -381,51 +400,54 @@ enum vpu_hws_native_fence_log_type {
VPU_HWS_NATIVE_FENCE_LOG_TYPE_SIGNALS = 2
};
-/* HWS native fence log buffer header. */
+/** HWS native fence log buffer header. */
struct vpu_hws_native_fence_log_header {
union {
struct {
- /* Index of the first free entry in buffer. */
+ /** Index of the first free entry in buffer. */
u32 first_free_entry_idx;
- /* Incremented each time NPU wraps around the buffer to write next entry. */
+ /**
+ * Incremented whenever the NPU wraps around the buffer and writes
+ * to the first entry again.
+ */
u32 wraparound_count;
};
- /* Field allowing atomic update of both fields above. */
+ /** Field allowing atomic update of both fields above. */
u64 atomic_wraparound_and_entry_idx;
};
- /* Log buffer type, see enum vpu_hws_native_fence_log_type. */
+ /** Log buffer type, see enum vpu_hws_native_fence_log_type. */
u64 type;
- /* Allocated number of entries in the log buffer. */
+ /** Allocated number of entries in the log buffer. */
u64 entry_nb;
u64 reserved[2];
};
-/* Native fence log operation types. */
+/** Native fence log operation types. */
enum vpu_hws_native_fence_log_op {
VPU_HWS_NATIVE_FENCE_LOG_OP_SIGNAL_EXECUTED = 0,
VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED = 1
};
-/* HWS native fence log entry. */
+/** HWS native fence log entry. */
struct vpu_hws_native_fence_log_entry {
- /* Newly signaled/unblocked fence value. */
+ /** Newly signaled/unblocked fence value. */
u64 fence_value;
- /* Native fence object handle to which this operation belongs. */
+ /** Native fence object handle to which this operation belongs. */
u64 fence_handle;
- /* Operation type, see enum vpu_hws_native_fence_log_op. */
+ /** Operation type, see enum vpu_hws_native_fence_log_op. */
u64 op_type;
u64 reserved_0;
- /*
+ /**
* VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED only: Timestamp at which fence
* wait was started (in NPU SysTime).
*/
u64 fence_wait_start_ts;
u64 reserved_1;
- /* Timestamp at which fence operation was completed (in NPU SysTime). */
+ /** Timestamp at which fence operation was completed (in NPU SysTime). */
u64 fence_end_ts;
};
-/* Native fence log buffer. */
+/** Native fence log buffer. */
struct vpu_hws_native_fence_log_buffer {
struct vpu_hws_native_fence_log_header header;
struct vpu_hws_native_fence_log_entry entry[];
@@ -435,10 +457,17 @@ struct vpu_hws_native_fence_log_buffer {
* Host <-> VPU IPC messages types.
*/
enum vpu_ipc_msg_type {
+ /** Unsupported command */
VPU_JSM_MSG_UNKNOWN = 0xFFFFFFFF,
- /* IPC Host -> Device, Async commands */
+ /** IPC Host -> Device, base id for async commands */
VPU_JSM_MSG_ASYNC_CMD = 0x1100,
+ /**
+ * Reset engine. The NPU cancels all the jobs currently executing on the target
+ * engine making the engine become idle and then does a HW reset, before returning
+ * to the host.
+ * @see struct vpu_ipc_msg_payload_engine_reset
+ */
VPU_JSM_MSG_ENGINE_RESET = VPU_JSM_MSG_ASYNC_CMD,
/**
* Preempt engine. The NPU stops (preempts) all the jobs currently
@@ -448,10 +477,24 @@ enum vpu_ipc_msg_type {
* the target engine, but it stops processing them (until the queue doorbell
* is rung again); the host is responsible to reset the job queue, either
* after preemption or when resubmitting jobs to the queue.
+ * @see vpu_ipc_msg_payload_engine_preempt
*/
VPU_JSM_MSG_ENGINE_PREEMPT = 0x1101,
+ /**
+ * OS scheduling doorbell register command
+ * @see vpu_ipc_msg_payload_register_db
+ */
VPU_JSM_MSG_REGISTER_DB = 0x1102,
+ /**
+ * OS scheduling doorbell unregister command
+ * @see vpu_ipc_msg_payload_unregister_db
+ */
VPU_JSM_MSG_UNREGISTER_DB = 0x1103,
+ /**
+ * Query engine heartbeat. Heartbeat is expected to increase monotonically
+ * and increase while work is being progressed by NPU.
+ * @see vpu_ipc_msg_payload_query_engine_hb
+ */
VPU_JSM_MSG_QUERY_ENGINE_HB = 0x1104,
VPU_JSM_MSG_GET_POWER_LEVEL_COUNT = 0x1105,
VPU_JSM_MSG_GET_POWER_LEVEL = 0x1106,
@@ -477,6 +520,7 @@ enum vpu_ipc_msg_type {
* aborted and removed from internal scheduling queues. All doorbells assigned
* to the host_ssid are unregistered and any internal FW resources belonging to
* the host_ssid are released.
+ * @see vpu_ipc_msg_payload_ssid_release
*/
VPU_JSM_MSG_SSID_RELEASE = 0x110e,
/**
@@ -504,43 +548,78 @@ enum vpu_ipc_msg_type {
* @see vpu_jsm_metric_streamer_start
*/
VPU_JSM_MSG_METRIC_STREAMER_INFO = 0x1112,
- /** Control command: Priority band setup */
+ /**
+ * Control command: Priority band setup
+ * @see vpu_ipc_msg_payload_hws_priority_band_setup
+ */
VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP = 0x1113,
- /** Control command: Create command queue */
+ /**
+ * Control command: Create command queue
+ * @see vpu_ipc_msg_payload_hws_create_cmdq
+ */
VPU_JSM_MSG_CREATE_CMD_QUEUE = 0x1114,
- /** Control command: Destroy command queue */
+ /**
+ * Control command: Destroy command queue
+ * @see vpu_ipc_msg_payload_hws_destroy_cmdq
+ */
VPU_JSM_MSG_DESTROY_CMD_QUEUE = 0x1115,
- /** Control command: Set context scheduling properties */
+ /**
+ * Control command: Set context scheduling properties
+ * @see vpu_ipc_msg_payload_hws_set_context_sched_properties
+ */
VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES = 0x1116,
- /*
+ /**
* Register a doorbell to notify VPU of new work. The doorbell may later be
* deallocated or reassigned to another context.
+ * @see vpu_jsm_hws_register_db
*/
VPU_JSM_MSG_HWS_REGISTER_DB = 0x1117,
- /** Control command: Log buffer setting */
+ /**
+ * Control command: Log buffer setting
+ * @see vpu_ipc_msg_payload_hws_set_scheduling_log
+ */
VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG = 0x1118,
- /* Control command: Suspend command queue. */
+ /**
+ * Control command: Suspend command queue.
+ * @see vpu_ipc_msg_payload_hws_suspend_cmdq
+ */
VPU_JSM_MSG_HWS_SUSPEND_CMDQ = 0x1119,
- /* Control command: Resume command queue */
+ /**
+ * Control command: Resume command queue
+ * @see vpu_ipc_msg_payload_hws_resume_cmdq
+ */
VPU_JSM_MSG_HWS_RESUME_CMDQ = 0x111a,
- /* Control command: Resume engine after reset */
+ /**
+ * Control command: Resume engine after reset
+ * @see vpu_ipc_msg_payload_hws_resume_engine
+ */
VPU_JSM_MSG_HWS_ENGINE_RESUME = 0x111b,
- /* Control command: Enable survivability/DCT mode */
+ /**
+ * Control command: Enable survivability/DCT mode
+ * @see vpu_ipc_msg_payload_pwr_dct_control
+ */
VPU_JSM_MSG_DCT_ENABLE = 0x111c,
- /* Control command: Disable survivability/DCT mode */
+ /**
+ * Control command: Disable survivability/DCT mode
+ * This command has no payload
+ */
VPU_JSM_MSG_DCT_DISABLE = 0x111d,
/**
* Dump VPU state. To be used for debug purposes only.
- * NOTE: Please introduce new ASYNC commands before this one. *
+ * This command has no payload.
+ * NOTE: Please introduce new ASYNC commands before this one.
*/
VPU_JSM_MSG_STATE_DUMP = 0x11FF,
- /* IPC Host -> Device, General commands */
+ /** IPC Host -> Device, base id for general commands */
VPU_JSM_MSG_GENERAL_CMD = 0x1200,
+ /** Unsupported command */
VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED = VPU_JSM_MSG_GENERAL_CMD,
/**
* Control dyndbg behavior by executing a dyndbg command; equivalent to
- * Linux command: `echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control`.
+ * Linux command:
+ * @verbatim echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control @endverbatim
+ * @see vpu_ipc_msg_payload_dyndbg_control
*/
VPU_JSM_MSG_DYNDBG_CONTROL = 0x1201,
/**
@@ -548,17 +627,35 @@ enum vpu_ipc_msg_type {
*/
VPU_JSM_MSG_PWR_D0I3_ENTER = 0x1202,
- /* IPC Device -> Host, Job completion */
+ /**
+ * IPC Device -> Host, Job completion
+ * @see struct vpu_ipc_msg_payload_job_done
+ */
VPU_JSM_MSG_JOB_DONE = 0x2100,
- /* IPC Device -> Host, Fence signalled */
+ /**
+ * IPC Device -> Host, Fence signalled
+ * @see vpu_ipc_msg_payload_native_fence_signalled
+ */
VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED = 0x2101,
/* IPC Device -> Host, Async command completion */
VPU_JSM_MSG_ASYNC_CMD_DONE = 0x2200,
+ /**
+ * IPC Device -> Host, engine reset complete
+ * @see vpu_ipc_msg_payload_engine_reset_done
+ */
VPU_JSM_MSG_ENGINE_RESET_DONE = VPU_JSM_MSG_ASYNC_CMD_DONE,
+ /**
+ * Preempt complete message
+ * @see vpu_ipc_msg_payload_engine_preempt_done
+ */
VPU_JSM_MSG_ENGINE_PREEMPT_DONE = 0x2201,
VPU_JSM_MSG_REGISTER_DB_DONE = 0x2202,
VPU_JSM_MSG_UNREGISTER_DB_DONE = 0x2203,
+ /**
+ * Response to query engine heartbeat.
+ * @see vpu_ipc_msg_payload_query_engine_hb_done
+ */
VPU_JSM_MSG_QUERY_ENGINE_HB_DONE = 0x2204,
VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE = 0x2205,
VPU_JSM_MSG_GET_POWER_LEVEL_DONE = 0x2206,
@@ -575,7 +672,10 @@ enum vpu_ipc_msg_type {
VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP = 0x220c,
/** Response to VPU_JSM_MSG_TRACE_GET_NAME. */
VPU_JSM_MSG_TRACE_GET_NAME_RSP = 0x220d,
- /** Response to VPU_JSM_MSG_SSID_RELEASE. */
+ /**
+ * Response to VPU_JSM_MSG_SSID_RELEASE.
+ * @see vpu_ipc_msg_payload_ssid_release
+ */
VPU_JSM_MSG_SSID_RELEASE_DONE = 0x220e,
/**
* Response to VPU_JSM_MSG_METRIC_STREAMER_START.
@@ -605,37 +705,71 @@ enum vpu_ipc_msg_type {
/**
* Asynchronous event sent from the VPU to the host either when the current
* metric buffer is full or when the VPU has collected a multiple of
- * @notify_sample_count samples as indicated through the start command
- * (VPU_JSM_MSG_METRIC_STREAMER_START). Returns information about collected
- * metric data.
+ * @ref vpu_jsm_metric_streamer_start::notify_sample_count samples as indicated
+ * through the start command (VPU_JSM_MSG_METRIC_STREAMER_START). Returns
+ * information about collected metric data.
* @see vpu_jsm_metric_streamer_done
*/
VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION = 0x2213,
- /** Response to control command: Priority band setup */
+ /**
+ * Response to control command: Priority band setup
+ * @see vpu_ipc_msg_payload_hws_priority_band_setup
+ */
VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP = 0x2214,
- /** Response to control command: Create command queue */
+ /**
+ * Response to control command: Create command queue
+ * @see vpu_ipc_msg_payload_hws_create_cmdq_rsp
+ */
VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP = 0x2215,
- /** Response to control command: Destroy command queue */
+ /**
+ * Response to control command: Destroy command queue
+ * @see vpu_ipc_msg_payload_hws_destroy_cmdq
+ */
VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP = 0x2216,
- /** Response to control command: Set context scheduling properties */
+ /**
+ * Response to control command: Set context scheduling properties
+ * @see vpu_ipc_msg_payload_hws_set_context_sched_properties
+ */
VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP = 0x2217,
- /** Response to control command: Log buffer setting */
+ /**
+ * Response to control command: Log buffer setting
+ * @see vpu_ipc_msg_payload_hws_set_scheduling_log
+ */
VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP = 0x2218,
- /* IPC Device -> Host, HWS notify index entry of log buffer written */
+ /**
+ * IPC Device -> Host, HWS notify index entry of log buffer written
+ * @see vpu_ipc_msg_payload_hws_scheduling_log_notification
+ */
VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION = 0x2219,
- /* IPC Device -> Host, HWS completion of a context suspend request */
+ /**
+ * IPC Device -> Host, HWS completion of a context suspend request
+ * @see vpu_ipc_msg_payload_hws_suspend_cmdq
+ */
VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE = 0x221a,
- /* Response to control command: Resume command queue */
+ /**
+ * Response to control command: Resume command queue
+ * @see vpu_ipc_msg_payload_hws_resume_cmdq
+ */
VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP = 0x221b,
- /* Response to control command: Resume engine command response */
+ /**
+ * Response to control command: Resume engine command response
+ * @see vpu_ipc_msg_payload_hws_resume_engine
+ */
VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE = 0x221c,
- /* Response to control command: Enable survivability/DCT mode */
+ /**
+ * Response to control command: Enable survivability/DCT mode
+ * This command has no payload
+ */
VPU_JSM_MSG_DCT_ENABLE_DONE = 0x221d,
- /* Response to control command: Disable survivability/DCT mode */
+ /**
+ * Response to control command: Disable survivability/DCT mode
+ * This command has no payload
+ */
VPU_JSM_MSG_DCT_DISABLE_DONE = 0x221e,
/**
* Response to state dump control command.
- * NOTE: Please introduce new ASYNC responses before this one. *
+ * This command has no payload.
+ * NOTE: Please introduce new ASYNC responses before this one.
*/
VPU_JSM_MSG_STATE_DUMP_RSP = 0x22FF,
@@ -653,57 +787,66 @@ enum vpu_ipc_msg_type {
enum vpu_ipc_msg_status { VPU_JSM_MSG_FREE, VPU_JSM_MSG_ALLOCATED };
-/*
- * Host <-> LRT IPC message payload definitions
+/**
+ * Engine reset request payload
+ * @see VPU_JSM_MSG_ENGINE_RESET
*/
struct vpu_ipc_msg_payload_engine_reset {
- /* Engine to be reset. */
+ /** Engine to be reset. */
u32 engine_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
+/**
+ * Engine preemption request struct
+ * @see VPU_JSM_MSG_ENGINE_PREEMPT
+ */
struct vpu_ipc_msg_payload_engine_preempt {
- /* Engine to be preempted. */
+ /** Engine to be preempted. */
u32 engine_idx;
- /* ID of the preemption request. */
+ /** ID of the preemption request. */
u32 preempt_id;
};
-/*
- * @brief Register doorbell command structure.
+/**
+ * Register doorbell command structure.
* This structure supports doorbell registration for only OS scheduling.
* @see VPU_JSM_MSG_REGISTER_DB
*/
struct vpu_ipc_msg_payload_register_db {
- /* Index of the doorbell to register. */
+ /** Index of the doorbell to register. */
u32 db_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
- /* Virtual address in Global GTT pointing to the start of job queue. */
+ /** Virtual address in Global GTT pointing to the start of job queue. */
u64 jobq_base;
- /* Size of the job queue in bytes. */
+ /** Size of the job queue in bytes. */
u32 jobq_size;
- /* Host sub-stream ID for the context assigned to the doorbell. */
+ /** Host sub-stream ID for the context assigned to the doorbell. */
u32 host_ssid;
};
/**
- * @brief Unregister doorbell command structure.
+ * Unregister doorbell command structure.
* Request structure to unregister a doorbell for both HW and OS scheduling.
* @see VPU_JSM_MSG_UNREGISTER_DB
*/
struct vpu_ipc_msg_payload_unregister_db {
- /* Index of the doorbell to unregister. */
+ /** Index of the doorbell to unregister. */
u32 db_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
+/**
+ * Heartbeat request structure
+ * @see VPU_JSM_MSG_QUERY_ENGINE_HB
+ */
struct vpu_ipc_msg_payload_query_engine_hb {
- /* Engine to return heartbeat value. */
+ /** Engine to return heartbeat value. */
u32 engine_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
@@ -723,10 +866,14 @@ struct vpu_ipc_msg_payload_power_level {
u32 reserved_0;
};
+/**
+ * Structure for requesting ssid release
+ * @see VPU_JSM_MSG_SSID_RELEASE
+ */
struct vpu_ipc_msg_payload_ssid_release {
- /* Host sub-stream ID for the context to be released. */
+ /** Host sub-stream ID for the context to be released. */
u32 host_ssid;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
@@ -752,7 +899,7 @@ struct vpu_jsm_metric_streamer_start {
u64 sampling_rate;
/**
* If > 0 the VPU will send a VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION message
- * after every @notify_sample_count samples is collected or dropped by the VPU.
+ * after every @ref notify_sample_count samples is collected or dropped by the VPU.
* If set to UINT_MAX the VPU will only generate a notification when the metric
* buffer is full. If set to 0 the VPU will never generate a notification.
*/
@@ -762,9 +909,9 @@ struct vpu_jsm_metric_streamer_start {
* Address and size of the buffer where the VPU will write metric data. The
* VPU writes all counters from enabled metric groups one after another. If
* there is no space left to write data at the next sample period the VPU
- * will switch to the next buffer (@see next_buffer_addr) and will optionally
- * send a notification to the host driver if @notify_sample_count is non-zero.
- * If @next_buffer_addr is NULL the VPU will stop collecting metric data.
+ * will switch to the next buffer (@ref next_buffer_addr) and will optionally
+ * send a notification to the host driver if @ref notify_sample_count is non-zero.
+ * If @ref next_buffer_addr is NULL the VPU will stop collecting metric data.
*/
u64 buffer_addr;
u64 buffer_size;
@@ -827,63 +974,80 @@ struct vpu_jsm_metric_streamer_update {
u64 next_buffer_size;
};
+/**
+ * Device -> host job completion message.
+ * @see VPU_JSM_MSG_JOB_DONE
+ */
struct vpu_ipc_msg_payload_job_done {
- /* Engine to which the job was submitted. */
+ /** Engine to which the job was submitted. */
u32 engine_idx;
- /* Index of the doorbell to which the job was submitted */
+ /** Index of the doorbell to which the job was submitted */
u32 db_idx;
- /* ID of the completed job */
+ /** ID of the completed job */
u32 job_id;
- /* Status of the completed job */
+ /** Status of the completed job */
u32 job_status;
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
};
-/*
+/**
* Notification message upon native fence signalling.
* @see VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED
*/
struct vpu_ipc_msg_payload_native_fence_signalled {
- /* Engine ID. */
+ /** Engine ID. */
u32 engine_idx;
- /* Host SSID. */
+ /** Host SSID. */
u32 host_ssid;
- /* CMDQ ID */
+ /** CMDQ ID */
u64 cmdq_id;
- /* Fence object handle. */
+ /** Fence object handle. */
u64 fence_handle;
};
+/**
+ * vpu_ipc_msg_payload_engine_reset_done will contain an array of this structure
+ * which contains which queues caused reset if FW was able to detect any error.
+ * @see vpu_ipc_msg_payload_engine_reset_done
+ */
struct vpu_jsm_engine_reset_context {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
- /* See VPU_ENGINE_RESET_CONTEXT_* defines */
+ /** See VPU_ENGINE_RESET_CONTEXT_* defines */
u64 flags;
};
+/**
+ * Engine reset response.
+ * @see VPU_JSM_MSG_ENGINE_RESET_DONE
+ */
struct vpu_ipc_msg_payload_engine_reset_done {
- /* Engine ordinal */
+ /** Engine ordinal */
u32 engine_idx;
- /* Number of impacted contexts */
+ /** Number of impacted contexts */
u32 num_impacted_contexts;
- /* Array of impacted command queue ids and their flags */
+ /** Array of impacted command queue ids and their flags */
struct vpu_jsm_engine_reset_context
impacted_contexts[VPU_MAX_ENGINE_RESET_IMPACTED_CONTEXTS];
};
+/**
+ * Preemption response struct
+ * @see VPU_JSM_MSG_ENGINE_PREEMPT_DONE
+ */
struct vpu_ipc_msg_payload_engine_preempt_done {
- /* Engine preempted. */
+ /** Engine preempted. */
u32 engine_idx;
- /* ID of the preemption request. */
+ /** ID of the preemption request. */
u32 preempt_id;
};
@@ -912,12 +1076,16 @@ struct vpu_ipc_msg_payload_unregister_db_done {
u32 reserved_0;
};
+/**
+ * Structure for heartbeat response
+ * @see VPU_JSM_MSG_QUERY_ENGINE_HB_DONE
+ */
struct vpu_ipc_msg_payload_query_engine_hb_done {
- /* Engine returning heartbeat value. */
+ /** Engine returning heartbeat value. */
u32 engine_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
- /* Heartbeat value. */
+ /** Heartbeat value. */
u64 heartbeat;
};
@@ -937,7 +1105,10 @@ struct vpu_ipc_msg_payload_get_power_level_count_done {
u8 power_limit[16];
};
-/* HWS priority band setup request / response */
+/**
+ * HWS priority band setup request / response
+ * @see VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP
+ */
struct vpu_ipc_msg_payload_hws_priority_band_setup {
/*
* Grace period in 100ns units when preempting another priority band for
@@ -964,15 +1135,23 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup {
* TDR timeout value in milliseconds. Default value of 0 meaning no timeout.
*/
u32 tdr_timeout;
+ /* Non-interactive queue timeout for no progress of heartbeat in milliseconds.
+ * Default value of 0 meaning no timeout.
+ */
+ u32 non_interactive_no_progress_timeout;
+ /*
+ * Non-interactive queue upper limit timeout value in milliseconds. Default
+ * value of 0 meaning no timeout.
+ */
+ u32 non_interactive_timeout;
};
-/*
+/**
* @brief HWS create command queue request.
* Host will create a command queue via this command.
* Note: Cmdq group is a handle of an object which
* may contain one or more command queues.
* @see VPU_JSM_MSG_CREATE_CMD_QUEUE
- * @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP
*/
struct vpu_ipc_msg_payload_hws_create_cmdq {
/* Process id */
@@ -993,66 +1172,73 @@ struct vpu_ipc_msg_payload_hws_create_cmdq {
u32 reserved_0;
};
-/*
- * @brief HWS create command queue response.
- * @see VPU_JSM_MSG_CREATE_CMD_QUEUE
+/**
+ * HWS create command queue response.
* @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP
*/
struct vpu_ipc_msg_payload_hws_create_cmdq_rsp {
- /* Process id */
+ /** Process id */
u64 process_id;
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Engine for which queue is being created */
+ /** Engine for which queue is being created */
u32 engine_idx;
- /* Command queue group */
+ /** Command queue group */
u64 cmdq_group;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
};
-/* HWS destroy command queue request / response */
+/**
+ * HWS destroy command queue request / response
+ * @see VPU_JSM_MSG_DESTROY_CMD_QUEUE
+ * @see VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP
+ */
struct vpu_ipc_msg_payload_hws_destroy_cmdq {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
};
-/* HWS set context scheduling properties request / response */
+/**
+ * HWS set context scheduling properties request / response
+ * @see VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES
+ * @see VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP
+ */
struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
- /*
+ /**
* Priority band to assign to work of this context.
* Available priority bands: @see enum vpu_job_scheduling_priority_band
*/
u32 priority_band;
- /* Inside realtime band assigns a further priority */
+ /** Inside realtime band assigns a further priority */
u32 realtime_priority_level;
- /* Priority relative to other contexts in the same process */
+ /** Priority relative to other contexts in the same process */
s32 in_process_priority;
- /* Zero padding / Reserved */
+ /** Zero padding / Reserved */
u32 reserved_1;
- /*
+ /**
* Context quantum relative to other contexts of same priority in the same process
* Minimum value supported by NPU is 1ms (10000 in 100ns units).
*/
u64 context_quantum;
- /* Grace period when preempting context of the same priority within the same process */
+ /** Grace period when preempting context of the same priority within the same process */
u64 grace_period_same_priority;
- /* Grace period when preempting context of a lower priority within the same process */
+ /** Grace period when preempting context of a lower priority within the same process */
u64 grace_period_lower_priority;
};
-/*
- * @brief Register doorbell command structure.
+/**
+ * Register doorbell command structure.
* This structure supports doorbell registration for both HW and OS scheduling.
* Note: Queue base and size are added here so that the same structure can be used for
* OS scheduling and HW scheduling. For OS scheduling, cmdq_id will be ignored
@@ -1061,27 +1247,27 @@ struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
* @see VPU_JSM_MSG_HWS_REGISTER_DB
*/
struct vpu_jsm_hws_register_db {
- /* Index of the doorbell to register. */
+ /** Index of the doorbell to register. */
u32 db_id;
- /* Host sub-stream ID for the context assigned to the doorbell. */
+ /** Host sub-stream ID for the context assigned to the doorbell. */
u32 host_ssid;
- /* ID of the command queue associated with the doorbell. */
+ /** ID of the command queue associated with the doorbell. */
u64 cmdq_id;
- /* Virtual address pointing to the start of command queue. */
+ /** Virtual address pointing to the start of command queue. */
u64 cmdq_base;
- /* Size of the command queue in bytes. */
+ /** Size of the command queue in bytes. */
u64 cmdq_size;
};
-/*
- * @brief Structure to set another buffer to be used for scheduling-related logging.
+/**
+ * Structure to set another buffer to be used for scheduling-related logging.
* The size of the logging buffer and the number of entries is defined as part of the
* buffer itself as described next.
* The log buffer received from the host is made up of;
- * - header: 32 bytes in size, as shown in 'struct vpu_hws_log_buffer_header'.
+ * - header: 32 bytes in size, as shown in @ref vpu_hws_log_buffer_header.
* The header contains the number of log entries in the buffer.
* - log entry: 0 to n-1, each log entry is 32 bytes in size, as shown in
- * 'struct vpu_hws_log_buffer_entry'.
+ * @ref vpu_hws_log_buffer_entry.
* The entry contains the VPU timestamp, operation type and data.
* The host should provide the notify index value of log buffer to VPU. This is a
* value defined within the log buffer and when written to will generate the
@@ -1095,30 +1281,30 @@ struct vpu_jsm_hws_register_db {
* @see VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION
*/
struct vpu_ipc_msg_payload_hws_set_scheduling_log {
- /* Engine ordinal */
+ /** Engine ordinal */
u32 engine_idx;
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /*
+ /**
* VPU log buffer virtual address.
* Set to 0 to disable logging for this engine.
*/
u64 vpu_log_buffer_va;
- /*
+ /**
* Notify index of log buffer. VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION
* is generated when an event log is written to this index.
*/
u64 notify_index;
- /*
+ /**
* Field is now deprecated, will be removed when KMD is updated to support removal
*/
u32 enable_extra_events;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
};
-/*
- * @brief The scheduling log notification is generated by VPU when it writes
+/**
+ * The scheduling log notification is generated by VPU when it writes
* an event into the log buffer at the notify_index. VPU notifies host with
* VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION. This is an asynchronous
* message from VPU to host.
@@ -1126,14 +1312,14 @@ struct vpu_ipc_msg_payload_hws_set_scheduling_log {
* @see VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG
*/
struct vpu_ipc_msg_payload_hws_scheduling_log_notification {
- /* Engine ordinal */
+ /** Engine ordinal */
u32 engine_idx;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
};
-/*
- * @brief HWS suspend command queue request and done structure.
+/**
+ * HWS suspend command queue request and done structure.
* Host will request the suspend of contexts and VPU will;
* - Suspend all work on this context
* - Preempt any running work
@@ -1152,21 +1338,21 @@ struct vpu_ipc_msg_payload_hws_scheduling_log_notification {
* @see VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE
*/
struct vpu_ipc_msg_payload_hws_suspend_cmdq {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
- /*
+ /**
* Suspend fence value - reported by the VPU suspend context
* completed once suspend is complete.
*/
u64 suspend_fence_value;
};
-/*
- * @brief HWS Resume command queue request / response structure.
+/**
+ * HWS Resume command queue request / response structure.
* Host will request the resume of a context;
* - VPU will resume all work on this context
* - Scheduler will allow this context to be scheduled
@@ -1174,25 +1360,25 @@ struct vpu_ipc_msg_payload_hws_suspend_cmdq {
* @see VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP
*/
struct vpu_ipc_msg_payload_hws_resume_cmdq {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
};
-/*
- * @brief HWS Resume engine request / response structure.
- * After a HWS engine reset, all scheduling is stopped on VPU until a engine resume.
+/**
+ * HWS Resume engine request / response structure.
+ * After a HWS engine reset, all scheduling is stopped on VPU until an engine resume.
* Host shall send this command to resume scheduling of any valid queue.
- * @see VPU_JSM_MSG_HWS_RESUME_ENGINE
+ * @see VPU_JSM_MSG_HWS_ENGINE_RESUME
* @see VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE
*/
struct vpu_ipc_msg_payload_hws_resume_engine {
- /* Engine to be resumed */
+ /** Engine to be resumed */
u32 engine_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
@@ -1326,7 +1512,7 @@ struct vpu_jsm_metric_streamer_done {
/**
* Metric group description placed in the metric buffer after successful completion
* of the VPU_JSM_MSG_METRIC_STREAMER_INFO command. This is followed by one or more
- * @vpu_jsm_metric_counter_descriptor records.
+ * @ref vpu_jsm_metric_counter_descriptor records.
* @see VPU_JSM_MSG_METRIC_STREAMER_INFO
*/
struct vpu_jsm_metric_group_descriptor {
@@ -1413,29 +1599,24 @@ struct vpu_jsm_metric_counter_descriptor {
};
/**
- * Payload for VPU_JSM_MSG_DYNDBG_CONTROL requests.
+ * Payload for @ref VPU_JSM_MSG_DYNDBG_CONTROL requests.
*
- * VPU_JSM_MSG_DYNDBG_CONTROL are used to control the VPU FW Dynamic Debug
- * feature, which allows developers to selectively enable / disable MVLOG_DEBUG
- * messages. This is equivalent to the Dynamic Debug functionality provided by
- * Linux
- * (https://www.kernel.org/doc/html/latest/admin-guide/dynamic-debug-howto.html)
- * The host can control Dynamic Debug behavior by sending dyndbg commands, which
- * have the same syntax as Linux
- * dyndbg commands.
+ * VPU_JSM_MSG_DYNDBG_CONTROL requests are used to control the VPU FW dynamic debug
+ * feature, which allows developers to selectively enable/disable code to obtain
+ * additional FW information. This is equivalent to the dynamic debug functionality
+ * provided by Linux. The host can control dynamic debug behavior by sending dyndbg
+ * commands, using the same syntax as for Linux dynamic debug commands.
*
- * NOTE: in order for MVLOG_DEBUG messages to be actually printed, the host
- * still has to set the logging level to MVLOG_DEBUG, using the
- * VPU_JSM_MSG_TRACE_SET_CONFIG command.
+ * @see https://www.kernel.org/doc/html/latest/admin-guide/dynamic-debug-howto.html.
*
- * The host can see the current dynamic debug configuration by executing a
- * special 'show' command. The dyndbg configuration will be printed to the
- * configured logging destination using MVLOG_INFO logging level.
+ * NOTE:
+ * As the dynamic debug feature uses MVLOG messages to provide information, the host
+ * must first set the logging level to MVLOG_DEBUG, using the @ref VPU_JSM_MSG_TRACE_SET_CONFIG
+ * command.
*/
struct vpu_ipc_msg_payload_dyndbg_control {
/**
- * Dyndbg command (same format as Linux dyndbg); must be a NULL-terminated
- * string.
+ * Dyndbg command to be executed.
*/
char dyndbg_cmd[VPU_DYNDBG_CMD_MAX_LEN];
};
@@ -1456,7 +1637,7 @@ struct vpu_ipc_msg_payload_pwr_d0i3_enter {
};
/**
- * Payload for VPU_JSM_MSG_DCT_ENABLE message.
+ * Payload for @ref VPU_JSM_MSG_DCT_ENABLE message.
*
* Default values for DCT active/inactive times are 5.3ms and 30ms respectively,
* corresponding to a 85% duty cycle. This payload allows the host to tune these
@@ -1513,28 +1694,28 @@ union vpu_ipc_msg_payload {
struct vpu_ipc_msg_payload_pwr_dct_control pwr_dct_control;
};
-/*
- * Host <-> LRT IPC message base structure.
+/**
+ * Host <-> NPU IPC message base structure.
*
* NOTE: All instances of this object must be aligned on a 64B boundary
* to allow proper handling of VPU cache operations.
*/
struct vpu_jsm_msg {
- /* Reserved */
+ /** Reserved */
u64 reserved_0;
- /* Message type, see vpu_ipc_msg_type enum. */
+ /** Message type, see @ref vpu_ipc_msg_type. */
u32 type;
- /* Buffer status, see vpu_ipc_msg_status enum. */
+ /** Buffer status, see @ref vpu_ipc_msg_status. */
u32 status;
- /*
+ /**
* Request ID, provided by the host in a request message and passed
* back by VPU in the response message.
*/
u32 request_id;
- /* Request return code set by the VPU, see VPU_JSM_STATUS_* defines. */
+ /** Request return code set by the VPU, see VPU_JSM_STATUS_* defines. */
u32 result;
u64 reserved_1;
- /* Message payload depending on message type, see vpu_ipc_msg_payload union. */
+ /** Message payload depending on message type, see vpu_ipc_msg_payload union. */
union vpu_ipc_msg_payload payload;
};
diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c
index b86a8e48e731..9af8333db513 100644
--- a/drivers/accel/qaic/qaic_control.c
+++ b/drivers/accel/qaic/qaic_control.c
@@ -17,6 +17,7 @@
#include <linux/overflow.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
+#include <linux/sched/signal.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
@@ -655,8 +656,9 @@ static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper
return -EINVAL;
nelem = in_trans->queue_size;
- size = (get_dbc_req_elem_size() + get_dbc_rsp_elem_size()) * nelem;
- if (size / nelem != get_dbc_req_elem_size() + get_dbc_rsp_elem_size())
+ if (check_mul_overflow((u32)(get_dbc_req_elem_size() + get_dbc_rsp_elem_size()),
+ nelem,
+ &size))
return -EINVAL;
if (size + QAIC_DBC_Q_GAP + QAIC_DBC_Q_BUF_ALIGN < size)
@@ -810,7 +812,7 @@ static int encode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
}
if (ret)
- break;
+ goto out;
}
if (user_len != user_msg->len)
@@ -1079,7 +1081,6 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u
list_for_each_entry(w, &wrappers->list, list) {
kref_get(&w->ref_count);
- retry_count = 0;
ret = mhi_queue_buf(qdev->cntl_ch, DMA_TO_DEVICE, &w->msg, w->len,
list_is_last(&w->list, &wrappers->list) ? MHI_EOT : MHI_CHAIN);
if (ret) {
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index c4f117edb266..703ef0ce9da1 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -18,6 +18,7 @@
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/srcu.h>
+#include <linux/string.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
@@ -165,7 +166,7 @@ static void free_slice(struct kref *kref)
drm_gem_object_put(&slice->bo->base);
sg_free_table(slice->sgt);
kfree(slice->sgt);
- kfree(slice->reqs);
+ kvfree(slice->reqs);
kfree(slice);
}
@@ -404,7 +405,7 @@ static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo,
goto free_sgt;
}
- slice->reqs = kcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL);
+ slice->reqs = kvcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL);
if (!slice->reqs) {
ret = -ENOMEM;
goto free_slice;
@@ -430,7 +431,7 @@ static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo,
return 0;
free_req:
- kfree(slice->reqs);
+ kvfree(slice->reqs);
free_slice:
kfree(slice);
free_sgt:
@@ -643,8 +644,36 @@ static void qaic_free_object(struct drm_gem_object *obj)
kfree(bo);
}
+static struct sg_table *qaic_get_sg_table(struct drm_gem_object *obj)
+{
+ struct qaic_bo *bo = to_qaic_bo(obj);
+ struct scatterlist *sg, *sg_in;
+ struct sg_table *sgt, *sgt_in;
+ int i;
+
+ sgt_in = bo->sgt;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
+
+ if (sg_alloc_table(sgt, sgt_in->orig_nents, GFP_KERNEL)) {
+ kfree(sgt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sg = sgt->sgl;
+ for_each_sgtable_sg(sgt_in, sg_in, i) {
+ memcpy(sg, sg_in, sizeof(*sg));
+ sg = sg_next(sg);
+ }
+
+ return sgt;
+}
+
static const struct drm_gem_object_funcs qaic_gem_funcs = {
.free = qaic_free_object,
+ .get_sg_table = qaic_get_sg_table,
.print_info = qaic_gem_print_info,
.mmap = qaic_gem_object_mmap,
.vm_ops = &drm_vm_ops,
@@ -953,8 +982,9 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
if (args->hdr.count == 0)
return -EINVAL;
- arg_size = args->hdr.count * sizeof(*slice_ent);
- if (arg_size / args->hdr.count != sizeof(*slice_ent))
+ if (check_mul_overflow((unsigned long)args->hdr.count,
+ (unsigned long)sizeof(*slice_ent),
+ &arg_size))
return -EINVAL;
if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE))
@@ -984,18 +1014,12 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
user_data = u64_to_user_ptr(args->data);
- slice_ent = kzalloc(arg_size, GFP_KERNEL);
- if (!slice_ent) {
- ret = -EINVAL;
+ slice_ent = memdup_user(user_data, arg_size);
+ if (IS_ERR(slice_ent)) {
+ ret = PTR_ERR(slice_ent);
goto unlock_dev_srcu;
}
- ret = copy_from_user(slice_ent, user_data, arg_size);
- if (ret) {
- ret = -EFAULT;
- goto free_slice_ent;
- }
-
obj = drm_gem_object_lookup(file_priv, args->hdr.handle);
if (!obj) {
ret = -ENOENT;
@@ -1300,8 +1324,6 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
int usr_rcu_id, qdev_rcu_id;
struct qaic_device *qdev;
struct qaic_user *usr;
- u8 __user *user_data;
- unsigned long n;
u64 received_ts;
u32 queue_level;
u64 submit_ts;
@@ -1314,20 +1336,12 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
received_ts = ktime_get_ns();
size = is_partial ? sizeof(struct qaic_partial_execute_entry) : sizeof(*exec);
- n = (unsigned long)size * args->hdr.count;
- if (args->hdr.count == 0 || n / args->hdr.count != size)
+ if (args->hdr.count == 0)
return -EINVAL;
- user_data = u64_to_user_ptr(args->data);
-
- exec = kcalloc(args->hdr.count, size, GFP_KERNEL);
- if (!exec)
- return -ENOMEM;
-
- if (copy_from_user(exec, user_data, n)) {
- ret = -EFAULT;
- goto free_exec;
- }
+ exec = memdup_array_user(u64_to_user_ptr(args->data), args->hdr.count, size);
+ if (IS_ERR(exec))
+ return PTR_ERR(exec);
usr = file_priv->driver_priv;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
@@ -1396,7 +1410,6 @@ unlock_dev_srcu:
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
unlock_usr_srcu:
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
-free_exec:
kfree(exec);
return ret;
}
@@ -1749,7 +1762,8 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file
struct qaic_device *qdev;
struct qaic_user *usr;
struct qaic_bo *bo;
- int ret, i;
+ int ret = 0;
+ int i;
usr = file_priv->driver_priv;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
@@ -1770,18 +1784,12 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file
goto unlock_dev_srcu;
}
- ent = kcalloc(args->hdr.count, sizeof(*ent), GFP_KERNEL);
- if (!ent) {
- ret = -EINVAL;
+ ent = memdup_array_user(u64_to_user_ptr(args->data), args->hdr.count, sizeof(*ent));
+ if (IS_ERR(ent)) {
+ ret = PTR_ERR(ent);
goto unlock_dev_srcu;
}
- ret = copy_from_user(ent, u64_to_user_ptr(args->data), args->hdr.count * sizeof(*ent));
- if (ret) {
- ret = -EFAULT;
- goto free_ent;
- }
-
for (i = 0; i < args->hdr.count; i++) {
obj = drm_gem_object_lookup(file_priv, ent[i].handle);
if (!obj) {
@@ -1789,6 +1797,16 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file
goto free_ent;
}
bo = to_qaic_bo(obj);
+ if (!bo->sliced) {
+ drm_gem_object_put(obj);
+ ret = -EINVAL;
+ goto free_ent;
+ }
+ if (bo->dbc->id != args->hdr.dbc_id) {
+ drm_gem_object_put(obj);
+ ret = -EINVAL;
+ goto free_ent;
+ }
/*
* perf stats ioctl is called before wait ioctl is complete then
* the latency information is invalid.
diff --git a/drivers/accel/qaic/qaic_ras.c b/drivers/accel/qaic/qaic_ras.c
index 914ffc4a9970..f1d52a710136 100644
--- a/drivers/accel/qaic/qaic_ras.c
+++ b/drivers/accel/qaic/qaic_ras.c
@@ -514,21 +514,21 @@ static ssize_t ce_count_show(struct device *dev, struct device_attribute *attr,
{
struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
- return snprintf(buf, PAGE_SIZE, "%d\n", qdev->ce_count);
+ return sysfs_emit(buf, "%d\n", qdev->ce_count);
}
static ssize_t ue_count_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
- return snprintf(buf, PAGE_SIZE, "%d\n", qdev->ue_count);
+ return sysfs_emit(buf, "%d\n", qdev->ue_count);
}
static ssize_t ue_nonfatal_count_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
- return snprintf(buf, PAGE_SIZE, "%d\n", qdev->ue_nf_count);
+ return sysfs_emit(buf, "%d\n", qdev->ue_nf_count);
}
static DEVICE_ATTR_RO(ce_count);
diff --git a/drivers/accel/qaic/sahara.c b/drivers/accel/qaic/sahara.c
index 3ebcc1f7ff58..d5d16cd0d50d 100644
--- a/drivers/accel/qaic/sahara.c
+++ b/drivers/accel/qaic/sahara.c
@@ -159,6 +159,7 @@ struct sahara_context {
struct sahara_packet *rx;
struct work_struct fw_work;
struct work_struct dump_work;
+ struct work_struct read_data_work;
struct mhi_device *mhi_dev;
const char * const *image_table;
u32 table_size;
@@ -174,7 +175,10 @@ struct sahara_context {
u64 dump_image_offset;
void *mem_dump_freespace;
u64 dump_images_left;
+ u32 read_data_offset;
+ u32 read_data_length;
bool is_mem_dump_mode;
+ bool non_streaming;
};
static const char * const aic100_image_table[] = {
@@ -216,6 +220,11 @@ static const char * const aic200_image_table[] = {
[75] = "qcom/aic200/pvs.bin",
};
+static bool is_streaming(struct sahara_context *context)
+{
+ return !context->non_streaming;
+}
+
static int sahara_find_image(struct sahara_context *context, u32 image_id)
{
int ret;
@@ -265,6 +274,8 @@ static void sahara_send_reset(struct sahara_context *context)
int ret;
context->is_mem_dump_mode = false;
+ context->read_data_offset = 0;
+ context->read_data_length = 0;
context->tx[0]->cmd = cpu_to_le32(SAHARA_RESET_CMD);
context->tx[0]->length = cpu_to_le32(SAHARA_RESET_LENGTH);
@@ -319,9 +330,39 @@ static void sahara_hello(struct sahara_context *context)
dev_err(&context->mhi_dev->dev, "Unable to send hello response %d\n", ret);
}
+static int read_data_helper(struct sahara_context *context, int buf_index)
+{
+ enum mhi_flags mhi_flag;
+ u32 pkt_data_len;
+ int ret;
+
+ pkt_data_len = min(context->read_data_length, SAHARA_PACKET_MAX_SIZE);
+
+ memcpy(context->tx[buf_index],
+ &context->firmware->data[context->read_data_offset],
+ pkt_data_len);
+
+ context->read_data_offset += pkt_data_len;
+ context->read_data_length -= pkt_data_len;
+
+ if (is_streaming(context) || !context->read_data_length)
+ mhi_flag = MHI_EOT;
+ else
+ mhi_flag = MHI_CHAIN;
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE,
+ context->tx[buf_index], pkt_data_len, mhi_flag);
+ if (ret) {
+ dev_err(&context->mhi_dev->dev, "Unable to send read_data response %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static void sahara_read_data(struct sahara_context *context)
{
- u32 image_id, data_offset, data_len, pkt_data_len;
+ u32 image_id, data_offset, data_len;
int ret;
int i;
@@ -357,7 +398,7 @@ static void sahara_read_data(struct sahara_context *context)
* and is not needed here on error.
*/
- if (data_len > SAHARA_TRANSFER_MAX_SIZE) {
+ if (context->non_streaming && data_len > SAHARA_TRANSFER_MAX_SIZE) {
dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data len %d exceeds max xfer size %d\n",
data_len, SAHARA_TRANSFER_MAX_SIZE);
sahara_send_reset(context);
@@ -378,22 +419,18 @@ static void sahara_read_data(struct sahara_context *context)
return;
}
- for (i = 0; i < SAHARA_NUM_TX_BUF && data_len; ++i) {
- pkt_data_len = min(data_len, SAHARA_PACKET_MAX_SIZE);
-
- memcpy(context->tx[i], &context->firmware->data[data_offset], pkt_data_len);
+ context->read_data_offset = data_offset;
+ context->read_data_length = data_len;
- data_offset += pkt_data_len;
- data_len -= pkt_data_len;
+ if (is_streaming(context)) {
+ schedule_work(&context->read_data_work);
+ return;
+ }
- ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE,
- context->tx[i], pkt_data_len,
- !data_len ? MHI_EOT : MHI_CHAIN);
- if (ret) {
- dev_err(&context->mhi_dev->dev, "Unable to send read_data response %d\n",
- ret);
- return;
- }
+ for (i = 0; i < SAHARA_NUM_TX_BUF && context->read_data_length; ++i) {
+ ret = read_data_helper(context, i);
+ if (ret)
+ break;
}
}
@@ -538,6 +575,7 @@ static void sahara_parse_dump_table(struct sahara_context *context)
struct sahara_memory_dump_meta_v1 *dump_meta;
u64 table_nents;
u64 dump_length;
+ u64 mul_bytes;
int ret;
u64 i;
@@ -551,8 +589,9 @@ static void sahara_parse_dump_table(struct sahara_context *context)
dev_table[i].description[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0;
dev_table[i].filename[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0;
- dump_length = size_add(dump_length, le64_to_cpu(dev_table[i].length));
- if (dump_length == SIZE_MAX) {
+ if (check_add_overflow(dump_length,
+ le64_to_cpu(dev_table[i].length),
+ &dump_length)) {
/* Discard the dump */
sahara_send_reset(context);
return;
@@ -568,14 +607,17 @@ static void sahara_parse_dump_table(struct sahara_context *context)
dev_table[i].filename);
}
- dump_length = size_add(dump_length, sizeof(*dump_meta));
- if (dump_length == SIZE_MAX) {
+ if (check_add_overflow(dump_length, (u64)sizeof(*dump_meta), &dump_length)) {
/* Discard the dump */
sahara_send_reset(context);
return;
}
- dump_length = size_add(dump_length, size_mul(sizeof(*image_out_table), table_nents));
- if (dump_length == SIZE_MAX) {
+ if (check_mul_overflow((u64)sizeof(*image_out_table), table_nents, &mul_bytes)) {
+ /* Discard the dump */
+ sahara_send_reset(context);
+ return;
+ }
+ if (check_add_overflow(dump_length, mul_bytes, &dump_length)) {
/* Discard the dump */
sahara_send_reset(context);
return;
@@ -615,7 +657,7 @@ static void sahara_parse_dump_table(struct sahara_context *context)
/* Request the first chunk of the first image */
context->dump_image = &image_out_table[0];
- dump_length = min(context->dump_image->length, SAHARA_READ_MAX_SIZE);
+ dump_length = min_t(u64, context->dump_image->length, SAHARA_READ_MAX_SIZE);
/* Avoid requesting EOI sized data so that we can identify errors */
if (dump_length == SAHARA_END_OF_IMAGE_LENGTH)
dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2;
@@ -663,7 +705,7 @@ static void sahara_parse_dump_image(struct sahara_context *context)
/* Get next image chunk */
dump_length = context->dump_image->length - context->dump_image_offset;
- dump_length = min(dump_length, SAHARA_READ_MAX_SIZE);
+ dump_length = min_t(u64, dump_length, SAHARA_READ_MAX_SIZE);
/* Avoid requesting EOI sized data so that we can identify errors */
if (dump_length == SAHARA_END_OF_IMAGE_LENGTH)
dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2;
@@ -742,6 +784,13 @@ error:
sahara_send_reset(context);
}
+static void sahara_read_data_processing(struct work_struct *work)
+{
+ struct sahara_context *context = container_of(work, struct sahara_context, read_data_work);
+
+ read_data_helper(context, 0);
+}
+
static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
{
struct sahara_context *context;
@@ -756,34 +805,56 @@ static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_
if (!context->rx)
return -ENOMEM;
+ if (!strcmp(mhi_dev->mhi_cntrl->name, "AIC200")) {
+ context->image_table = aic200_image_table;
+ context->table_size = ARRAY_SIZE(aic200_image_table);
+ } else {
+ context->image_table = aic100_image_table;
+ context->table_size = ARRAY_SIZE(aic100_image_table);
+ context->non_streaming = true;
+ }
+
/*
- * AIC100 defines SAHARA_TRANSFER_MAX_SIZE as the largest value it
- * will request for READ_DATA. This is larger than
- * SAHARA_PACKET_MAX_SIZE, and we need 9x SAHARA_PACKET_MAX_SIZE to
- * cover SAHARA_TRANSFER_MAX_SIZE. When the remote side issues a
- * READ_DATA, it requires a transfer of the exact size requested. We
- * can use MHI_CHAIN to link multiple buffers into a single transfer
- * but the remote side will not consume the buffers until it sees an
- * EOT, thus we need to allocate enough buffers to put in the tx fifo
- * to cover an entire READ_DATA request of the max size.
+ * There are two firmware implementations for READ_DATA handling.
+ * The older "SBL" implementation defines a Sahara transfer size, and
+ * expects that the response is a single transport transfer. If the
+ * FW wants to transfer a file that is larger than the transfer size,
+ * the FW will issue multiple READ_DATA commands. For this
+ * implementation, we need to allocate enough buffers to contain the
+ * entire Sahara transfer size.
+ *
+ * The newer "XBL" implementation does not define a maximum transfer
+ * size and instead expects the data to be streamed over using the
+ * transport level MTU. The FW will issue a single READ_DATA command
+ * of whatever size, and consume multiple transport level transfers
+ * until the expected amount of data is consumed. For this
+ * implementation we only need a single buffer of the transport MTU
+ * but we'll need to be able to use it multiple times for a single
+ * READ_DATA request.
+ *
+ * AIC100 is the SBL implementation and defines SAHARA_TRANSFER_MAX_SIZE
+ * and we need 9x SAHARA_PACKET_MAX_SIZE to cover that. We can use
+ * MHI_CHAIN to link multiple buffers into a single transfer but the
+ * remote side will not consume the buffers until it sees an EOT, thus
+ * we need to allocate enough buffers to put in the tx fifo to cover an
+ * entire READ_DATA request of the max size.
+ *
+ * AIC200 is the XBL implementation, and so a single buffer will work.
*/
for (i = 0; i < SAHARA_NUM_TX_BUF; ++i) {
- context->tx[i] = devm_kzalloc(&mhi_dev->dev, SAHARA_PACKET_MAX_SIZE, GFP_KERNEL);
+ context->tx[i] = devm_kzalloc(&mhi_dev->dev,
+ SAHARA_PACKET_MAX_SIZE,
+ GFP_KERNEL);
if (!context->tx[i])
return -ENOMEM;
+ if (is_streaming(context))
+ break;
}
context->mhi_dev = mhi_dev;
INIT_WORK(&context->fw_work, sahara_processing);
INIT_WORK(&context->dump_work, sahara_dump_processing);
-
- if (!strcmp(mhi_dev->mhi_cntrl->name, "AIC200")) {
- context->image_table = aic200_image_table;
- context->table_size = ARRAY_SIZE(aic200_image_table);
- } else {
- context->image_table = aic100_image_table;
- context->table_size = ARRAY_SIZE(aic100_image_table);
- }
+ INIT_WORK(&context->read_data_work, sahara_read_data_processing);
context->active_image_id = SAHARA_IMAGE_ID_NONE;
dev_set_drvdata(&mhi_dev->dev, context);
@@ -814,6 +885,10 @@ static void sahara_mhi_remove(struct mhi_device *mhi_dev)
static void sahara_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
{
+ struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
+
+ if (!mhi_result->transaction_status && context->read_data_length && is_streaming(context))
+ schedule_work(&context->read_data_work);
}
static void sahara_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
index bb369b38b001..a5eef06c4226 100644
--- a/drivers/dma-buf/heaps/Kconfig
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -12,13 +12,3 @@ config DMABUF_HEAPS_CMA
Choose this option to enable dma-buf CMA heap. This heap is backed
by the Contiguous Memory Allocator (CMA). If your system has these
regions, you should say Y here.
-
-config DMABUF_HEAPS_CMA_LEGACY
- bool "Legacy DMA-BUF CMA Heap"
- default y
- depends on DMABUF_HEAPS_CMA
- help
- Add a duplicate CMA-backed dma-buf heap with legacy naming derived
- from the CMA area's devicetree node, or "reserved" if the area is not
- defined in the devicetree. This uses the same underlying allocator as
- CONFIG_DMABUF_HEAPS_CMA.
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 0df007111975..42f88193eab9 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -14,6 +14,7 @@
#include <linux/cma.h>
#include <linux/dma-buf.h>
+#include <linux/dma-buf/heaps/cma.h>
#include <linux/dma-heap.h>
#include <linux/dma-map-ops.h>
#include <linux/err.h>
@@ -21,12 +22,27 @@
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#define DEFAULT_CMA_NAME "default_cma_region"
+static struct cma *dma_areas[MAX_CMA_AREAS] __initdata;
+static unsigned int dma_areas_num __initdata;
+
+int __init dma_heap_cma_register_heap(struct cma *cma)
+{
+ if (dma_areas_num >= ARRAY_SIZE(dma_areas))
+ return -EINVAL;
+
+ dma_areas[dma_areas_num++] = cma;
+
+ return 0;
+}
+
struct cma_heap {
struct dma_heap *heap;
struct cma *cma;
@@ -395,33 +411,30 @@ static int __init __add_cma_heap(struct cma *cma, const char *name)
return 0;
}
-static int __init add_default_cma_heap(void)
+static int __init add_cma_heaps(void)
{
struct cma *default_cma = dev_get_cma_area(NULL);
- const char *legacy_cma_name;
+ unsigned int i;
int ret;
- if (!default_cma)
- return 0;
+ if (default_cma) {
+ ret = __add_cma_heap(default_cma, DEFAULT_CMA_NAME);
+ if (ret)
+ return ret;
+ }
- ret = __add_cma_heap(default_cma, DEFAULT_CMA_NAME);
- if (ret)
- return ret;
+ for (i = 0; i < dma_areas_num; i++) {
+ struct cma *cma = dma_areas[i];
- if (IS_ENABLED(CONFIG_DMABUF_HEAPS_CMA_LEGACY)) {
- legacy_cma_name = cma_get_name(default_cma);
- if (!strcmp(legacy_cma_name, DEFAULT_CMA_NAME)) {
- pr_warn("legacy name and default name are the same, skipping legacy heap\n");
- return 0;
+ ret = __add_cma_heap(cma, cma_get_name(cma));
+ if (ret) {
+ pr_warn("Failed to add CMA heap %s", cma_get_name(cma));
+ continue;
}
- ret = __add_cma_heap(default_cma, legacy_cma_name);
- if (ret)
- pr_warn("failed to add legacy heap: %pe\n",
- ERR_PTR(ret));
}
return 0;
}
-module_init(add_default_cma_heap);
+module_init(add_cma_heaps);
MODULE_DESCRIPTION("DMA-BUF CMA Heap");
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 4b2f7d794275..c2672f369aed 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -150,7 +150,8 @@ drm_kms_helper-y := \
drm_plane_helper.o \
drm_probe_helper.o \
drm_self_refresh_helper.o \
- drm_simple_kms_helper.o
+ drm_simple_kms_helper.o \
+ drm_vblank_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 3d032c4e2dce..aa3736de238d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -5219,7 +5219,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
dev_warn(adev->dev, "smart shift update failed\n");
if (notify_clients)
- drm_client_dev_suspend(adev_to_drm(adev), false);
+ drm_client_dev_suspend(adev_to_drm(adev));
cancel_delayed_work_sync(&adev->delayed_init_work);
@@ -5353,7 +5353,7 @@ exit:
flush_delayed_work(&adev->delayed_init_work);
if (notify_clients)
- drm_client_dev_resume(adev_to_drm(adev), false);
+ drm_client_dev_resume(adev_to_drm(adev));
amdgpu_ras_resume(adev);
@@ -5958,7 +5958,7 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
if (r)
goto out;
- drm_client_dev_resume(adev_to_drm(tmp_adev), false);
+ drm_client_dev_resume(adev_to_drm(tmp_adev));
/*
* The GPU enters bad state once faulty pages
@@ -6293,7 +6293,7 @@ static void amdgpu_device_halt_activities(struct amdgpu_device *adev,
*/
amdgpu_unregister_gpu_instance(tmp_adev);
- drm_client_dev_suspend(adev_to_drm(tmp_adev), false);
+ drm_client_dev_suspend(adev_to_drm(tmp_adev));
/* disable ras on ALL IPs */
if (!need_emergency_restart && !amdgpu_reset_in_dpc(adev) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index b7ebae289bea..094c508d3d44 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -198,7 +198,7 @@ static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj);
amdgpu_hmm_unregister(aobj);
- ttm_bo_put(&aobj->tbo);
+ ttm_bo_fini(&aobj->tbo);
}
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index bfa3199591b6..28fc81f3f229 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -12446,7 +12446,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
int j = state->num_private_objs-1;
dm_atomic_destroy_state(obj,
- state->private_objs[i].state);
+ state->private_objs[i].state_to_destroy);
/* If i is not at the end of the array then the
* last element needs to be moved to where i was
@@ -12457,7 +12457,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
state->private_objs[j];
state->private_objs[j].ptr = NULL;
- state->private_objs[j].state = NULL;
+ state->private_objs[j].state_to_destroy = NULL;
state->private_objs[j].old_state = NULL;
state->private_objs[j].new_state = NULL;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 2ad33559a33a..5a66948ffd24 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -111,6 +111,7 @@ komeda_crtc_atomic_check(struct drm_crtc *crtc,
static int
komeda_crtc_prepare(struct komeda_crtc *kcrtc)
{
+ struct drm_device *drm = kcrtc->base.dev;
struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
struct komeda_pipeline *master = kcrtc->master;
struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(kcrtc->base.state);
@@ -128,8 +129,8 @@ komeda_crtc_prepare(struct komeda_crtc *kcrtc)
err = mdev->funcs->change_opmode(mdev, new_mode);
if (err) {
- DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,",
- mdev->dpmode, new_mode);
+ drm_err(drm, "failed to change opmode: 0x%x -> 0x%x.\n,",
+ mdev->dpmode, new_mode);
goto unlock;
}
@@ -142,18 +143,18 @@ komeda_crtc_prepare(struct komeda_crtc *kcrtc)
if (new_mode != KOMEDA_MODE_DUAL_DISP) {
err = clk_set_rate(mdev->aclk, komeda_crtc_get_aclk(kcrtc_st));
if (err)
- DRM_ERROR("failed to set aclk.\n");
+ drm_err(drm, "failed to set aclk.\n");
err = clk_prepare_enable(mdev->aclk);
if (err)
- DRM_ERROR("failed to enable aclk.\n");
+ drm_err(drm, "failed to enable aclk.\n");
}
err = clk_set_rate(master->pxlclk, mode->crtc_clock * 1000);
if (err)
- DRM_ERROR("failed to set pxlclk for pipe%d\n", master->id);
+ drm_err(drm, "failed to set pxlclk for pipe%d\n", master->id);
err = clk_prepare_enable(master->pxlclk);
if (err)
- DRM_ERROR("failed to enable pxl clk for pipe%d.\n", master->id);
+ drm_err(drm, "failed to enable pxl clk for pipe%d.\n", master->id);
unlock:
mutex_unlock(&mdev->lock);
@@ -164,6 +165,7 @@ unlock:
static int
komeda_crtc_unprepare(struct komeda_crtc *kcrtc)
{
+ struct drm_device *drm = kcrtc->base.dev;
struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
struct komeda_pipeline *master = kcrtc->master;
u32 new_mode;
@@ -180,8 +182,8 @@ komeda_crtc_unprepare(struct komeda_crtc *kcrtc)
err = mdev->funcs->change_opmode(mdev, new_mode);
if (err) {
- DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,",
- mdev->dpmode, new_mode);
+ drm_err(drm, "failed to change opmode: 0x%x -> 0x%x.\n,",
+ mdev->dpmode, new_mode);
goto unlock;
}
@@ -200,6 +202,7 @@ unlock:
void komeda_crtc_handle_event(struct komeda_crtc *kcrtc,
struct komeda_events *evts)
{
+ struct drm_device *drm = kcrtc->base.dev;
struct drm_crtc *crtc = &kcrtc->base;
u32 events = evts->pipes[kcrtc->master->id];
@@ -212,7 +215,7 @@ void komeda_crtc_handle_event(struct komeda_crtc *kcrtc,
if (wb_conn)
drm_writeback_signal_completion(&wb_conn->base, 0);
else
- DRM_WARN("CRTC[%d]: EOW happen but no wb_connector.\n",
+ drm_warn(drm, "CRTC[%d]: EOW happen but no wb_connector.\n",
drm_crtc_index(&kcrtc->base));
}
/* will handle it together with the write back support */
@@ -236,7 +239,7 @@ void komeda_crtc_handle_event(struct komeda_crtc *kcrtc,
crtc->state->event = NULL;
drm_crtc_send_vblank_event(crtc, event);
} else {
- DRM_WARN("CRTC[%d]: FLIP happened but no pending commit.\n",
+ drm_warn(drm, "CRTC[%d]: FLIP happened but no pending commit.\n",
drm_crtc_index(&kcrtc->base));
}
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
@@ -309,7 +312,7 @@ komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc,
/* wait the flip take affect.*/
if (wait_for_completion_timeout(flip_done, HZ) == 0) {
- DRM_ERROR("wait pipe%d flip done timeout\n", kcrtc->master->id);
+ drm_err(drm, "wait pipe%d flip done timeout\n", kcrtc->master->id);
if (!input_flip_done) {
unsigned long flags;
@@ -562,6 +565,7 @@ static const struct drm_crtc_funcs komeda_crtc_funcs = {
int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms,
struct komeda_dev *mdev)
{
+ struct drm_device *drm = &kms->base;
struct komeda_crtc *crtc;
struct komeda_pipeline *master;
char str[16];
@@ -581,7 +585,7 @@ int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms,
else
sprintf(str, "None");
- DRM_INFO("CRTC-%d: master(pipe-%d) slave(%s).\n",
+ drm_info(drm, "CRTC-%d: master(pipe-%d) slave(%s).\n",
kms->n_crtcs, master->id, str);
kms->n_crtcs++;
@@ -613,6 +617,7 @@ static int komeda_attach_bridge(struct device *dev,
struct komeda_pipeline *pipe,
struct drm_encoder *encoder)
{
+ struct drm_device *drm = encoder->dev;
struct drm_bridge *bridge;
int err;
@@ -624,7 +629,7 @@ static int komeda_attach_bridge(struct device *dev,
err = drm_bridge_attach(encoder, bridge, NULL, 0);
if (err)
- dev_err(dev, "bridge_attach() failed for pipe: %s\n",
+ drm_err(drm, "bridge_attach() failed for pipe: %s\n",
of_node_full_name(pipe->of_node));
return err;
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 87f2e5ee8790..f1a5014bcfa1 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -263,7 +263,7 @@ static int malidp_se_check_scaling(struct malidp_plane *mp,
struct drm_plane_state *state)
{
struct drm_crtc_state *crtc_state =
- drm_atomic_get_existing_crtc_state(state->state, state->crtc);
+ drm_atomic_get_new_crtc_state(state->state, state->crtc);
struct malidp_crtc_state *mc;
u32 src_w, src_h;
int ret;
diff --git a/drivers/gpu/drm/armada/armada_plane.c b/drivers/gpu/drm/armada/armada_plane.c
index cc47c032dbc1..dae81ebafdb4 100644
--- a/drivers/gpu/drm/armada/armada_plane.c
+++ b/drivers/gpu/drm/armada/armada_plane.c
@@ -94,12 +94,7 @@ int armada_drm_plane_atomic_check(struct drm_plane *plane,
return 0;
}
- if (state)
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- crtc);
- else
- crtc_state = crtc->state;
-
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
0,
INT_MAX, true, false);
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
index 2547613155da..cdbcba3b43ad 100644
--- a/drivers/gpu/drm/ast/Makefile
+++ b/drivers/gpu/drm/ast/Makefile
@@ -6,7 +6,9 @@
ast-y := \
ast_2000.o \
ast_2100.o \
+ ast_2200.o \
ast_2300.o \
+ ast_2400.o \
ast_2500.o \
ast_2600.o \
ast_cursor.o \
@@ -14,7 +16,6 @@ ast-y := \
ast_dp501.o \
ast_dp.o \
ast_drv.o \
- ast_main.o \
ast_mm.o \
ast_mode.o \
ast_post.o \
diff --git a/drivers/gpu/drm/ast/ast_2000.c b/drivers/gpu/drm/ast/ast_2000.c
index 41c2aa1e425a..fa3bc23ce098 100644
--- a/drivers/gpu/drm/ast/ast_2000.c
+++ b/drivers/gpu/drm/ast/ast_2000.c
@@ -27,6 +27,9 @@
*/
#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
#include "ast_drv.h"
#include "ast_post.h"
@@ -147,3 +150,108 @@ int ast_2000_post(struct ast_device *ast)
return 0;
}
+
+/*
+ * Mode setting
+ */
+
+const struct ast_vbios_dclk_info ast_2000_dclk_table[] = {
+ {0x2c, 0xe7, 0x03}, /* 00: VCLK25_175 */
+ {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
+ {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */
+ {0x76, 0x63, 0x01}, /* 03: VCLK36 */
+ {0xee, 0x67, 0x01}, /* 04: VCLK40 */
+ {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */
+ {0xc6, 0x64, 0x01}, /* 06: VCLK50 */
+ {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */
+ {0x80, 0x64, 0x00}, /* 08: VCLK65 */
+ {0x7b, 0x63, 0x00}, /* 09: VCLK75 */
+ {0x67, 0x62, 0x00}, /* 0a: VCLK78_75 */
+ {0x7c, 0x62, 0x00}, /* 0b: VCLK94_5 */
+ {0x8e, 0x62, 0x00}, /* 0c: VCLK108 */
+ {0x85, 0x24, 0x00}, /* 0d: VCLK135 */
+ {0x67, 0x22, 0x00}, /* 0e: VCLK157_5 */
+ {0x6a, 0x22, 0x00}, /* 0f: VCLK162 */
+ {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
+ {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
+ {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
+ {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
+ {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
+ {0x47, 0x6c, 0x80}, /* 15: VCLK71 */
+ {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */
+ {0x77, 0x58, 0x80}, /* 17: VCLK119 */
+ {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
+ {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */
+ {0x3b, 0x2c, 0x81}, /* 1a: VCLK118_25 */
+};
+
+/*
+ * Device initialization
+ */
+
+void ast_2000_detect_tx_chip(struct ast_device *ast, bool need_post)
+{
+ enum ast_tx_chip tx_chip = AST_TX_NONE;
+ u8 vgacra3;
+
+ /*
+ * VGACRA3 Enhanced Color Mode Register, check if DVO is already
+ * enabled, in that case, assume we have a SIL164 TMDS transmitter
+ *
+ * Don't make that assumption if we the chip wasn't enabled and
+ * is at power-on reset, otherwise we'll incorrectly "detect" a
+ * SIL164 when there is none.
+ */
+ if (!need_post) {
+ vgacra3 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff);
+ if (vgacra3 & AST_IO_VGACRA3_DVO_ENABLED)
+ tx_chip = AST_TX_SIL164;
+ }
+
+ __ast_device_set_tx_chip(ast, tx_chip);
+}
+
+static const struct ast_device_quirks ast_2000_device_quirks = {
+ .crtc_mem_req_threshold_low = 31,
+ .crtc_mem_req_threshold_high = 47,
+};
+
+struct drm_device *ast_2000_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
+{
+ struct drm_device *dev;
+ struct ast_device *ast;
+ int ret;
+
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
+ if (IS_ERR(ast))
+ return ERR_CAST(ast);
+ dev = &ast->base;
+
+ ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2000_device_quirks);
+
+ ast->dclk_table = ast_2000_dclk_table;
+
+ ast_2000_detect_tx_chip(ast, need_post);
+
+ if (need_post) {
+ ret = ast_post_gpu(ast);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = ast_mode_config_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dev;
+}
diff --git a/drivers/gpu/drm/ast/ast_2100.c b/drivers/gpu/drm/ast/ast_2100.c
index 829e3b8b0d19..05aeb0624d41 100644
--- a/drivers/gpu/drm/ast/ast_2100.c
+++ b/drivers/gpu/drm/ast/ast_2100.c
@@ -27,6 +27,9 @@
*/
#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
#include "ast_drv.h"
#include "ast_post.h"
@@ -386,3 +389,92 @@ int ast_2100_post(struct ast_device *ast)
return 0;
}
+
+/*
+ * Widescreen detection
+ */
+
+/* Try to detect WSXGA+ on Gen2+ */
+bool __ast_2100_detect_wsxga_p(struct ast_device *ast)
+{
+ u8 vgacrd0 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd0);
+
+ if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_BY_BMC))
+ return true;
+ if (vgacrd0 & AST_IO_VGACRD0_IKVM_WIDESCREEN)
+ return true;
+
+ return false;
+}
+
+/* Try to detect WUXGA on Gen2+ */
+bool __ast_2100_detect_wuxga(struct ast_device *ast)
+{
+ u8 vgacrd1;
+
+ if (ast->support_fullhd) {
+ vgacrd1 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd1);
+ if (!(vgacrd1 & AST_IO_VGACRD1_SUPPORTS_WUXGA))
+ return true;
+ }
+
+ return false;
+}
+
+static void ast_2100_detect_widescreen(struct ast_device *ast)
+{
+ if (__ast_2100_detect_wsxga_p(ast)) {
+ ast->support_wsxga_p = true;
+ if (ast->chip == AST2100)
+ ast->support_fullhd = true;
+ }
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+}
+
+static const struct ast_device_quirks ast_2100_device_quirks = {
+ .crtc_mem_req_threshold_low = 47,
+ .crtc_mem_req_threshold_high = 63,
+};
+
+struct drm_device *ast_2100_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
+{
+ struct drm_device *dev;
+ struct ast_device *ast;
+ int ret;
+
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
+ if (IS_ERR(ast))
+ return ERR_CAST(ast);
+ dev = &ast->base;
+
+ ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2100_device_quirks);
+
+ ast->dclk_table = ast_2000_dclk_table;
+
+ ast_2000_detect_tx_chip(ast, need_post);
+
+ if (need_post) {
+ ret = ast_post_gpu(ast);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ast_2100_detect_widescreen(ast);
+
+ ret = ast_mode_config_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dev;
+}
diff --git a/drivers/gpu/drm/ast/ast_2200.c b/drivers/gpu/drm/ast/ast_2200.c
new file mode 100644
index 000000000000..b64345d11ffa
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2200.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+
+#include "ast_drv.h"
+
+static void ast_2200_detect_widescreen(struct ast_device *ast)
+{
+ if (__ast_2100_detect_wsxga_p(ast)) {
+ ast->support_wsxga_p = true;
+ if (ast->chip == AST2200)
+ ast->support_fullhd = true;
+ }
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+}
+
+static const struct ast_device_quirks ast_2200_device_quirks = {
+ .crtc_mem_req_threshold_low = 47,
+ .crtc_mem_req_threshold_high = 63,
+};
+
+struct drm_device *ast_2200_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
+{
+ struct drm_device *dev;
+ struct ast_device *ast;
+ int ret;
+
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
+ if (IS_ERR(ast))
+ return ERR_CAST(ast);
+ dev = &ast->base;
+
+ ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2200_device_quirks);
+
+ ast->dclk_table = ast_2000_dclk_table;
+
+ ast_2000_detect_tx_chip(ast, need_post);
+
+ if (need_post) {
+ ret = ast_post_gpu(ast);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ast_2200_detect_widescreen(ast);
+
+ ret = ast_mode_config_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dev;
+}
+
diff --git a/drivers/gpu/drm/ast/ast_2300.c b/drivers/gpu/drm/ast/ast_2300.c
index dc2a32244689..5f50d9f91ffd 100644
--- a/drivers/gpu/drm/ast/ast_2300.c
+++ b/drivers/gpu/drm/ast/ast_2300.c
@@ -27,6 +27,12 @@
*/
#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/sizes.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "ast_drv.h"
#include "ast_post.h"
@@ -1326,3 +1332,132 @@ int ast_2300_post(struct ast_device *ast)
return 0;
}
+
+/*
+ * Device initialization
+ */
+
+void ast_2300_detect_tx_chip(struct ast_device *ast)
+{
+ enum ast_tx_chip tx_chip = AST_TX_NONE;
+ struct drm_device *dev = &ast->base;
+ u8 vgacrd1;
+
+ /*
+ * On AST GEN4+, look at the configuration set by the SoC in
+ * the SOC scratch register #1 bits 11:8 (interestingly marked
+ * as "reserved" in the spec)
+ */
+ vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1,
+ AST_IO_VGACRD1_TX_TYPE_MASK);
+ switch (vgacrd1) {
+ /*
+ * GEN4 to GEN6
+ */
+ case AST_IO_VGACRD1_TX_SIL164_VBIOS:
+ tx_chip = AST_TX_SIL164;
+ break;
+ case AST_IO_VGACRD1_TX_DP501_VBIOS:
+ ast->dp501_fw_addr = drmm_kzalloc(dev, SZ_32K, GFP_KERNEL);
+ if (ast->dp501_fw_addr) {
+ /* backup firmware */
+ if (ast_backup_fw(ast, ast->dp501_fw_addr, SZ_32K)) {
+ drmm_kfree(dev, ast->dp501_fw_addr);
+ ast->dp501_fw_addr = NULL;
+ }
+ }
+ fallthrough;
+ case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW:
+ tx_chip = AST_TX_DP501;
+ break;
+ /*
+ * GEN7+
+ */
+ case AST_IO_VGACRD1_TX_ASTDP:
+ tx_chip = AST_TX_ASTDP;
+ break;
+ /*
+ * Several of the listed TX chips are not explicitly supported
+ * by the ast driver. If these exist in real-world devices, they
+ * are most likely reported as VGA or SIL164 outputs. We warn here
+ * to get bug reports for these devices. If none come in for some
+ * time, we can begin to fail device probing on these values.
+ */
+ case AST_IO_VGACRD1_TX_ITE66121_VBIOS:
+ drm_warn(dev, "ITE IT66121 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
+ break;
+ case AST_IO_VGACRD1_TX_CH7003_VBIOS:
+ drm_warn(dev, "Chrontel CH7003 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
+ break;
+ case AST_IO_VGACRD1_TX_ANX9807_VBIOS:
+ drm_warn(dev, "Analogix ANX9807 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
+ break;
+ }
+
+ __ast_device_set_tx_chip(ast, tx_chip);
+}
+
+static void ast_2300_detect_widescreen(struct ast_device *ast)
+{
+ if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST1300) {
+ ast->support_wsxga_p = true;
+ ast->support_fullhd = true;
+ }
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+}
+
+static const struct ast_device_quirks ast_2300_device_quirks = {
+ .crtc_mem_req_threshold_low = 96,
+ .crtc_mem_req_threshold_high = 120,
+};
+
+struct drm_device *ast_2300_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
+{
+ struct drm_device *dev;
+ struct ast_device *ast;
+ int ret;
+
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
+ if (IS_ERR(ast))
+ return ERR_CAST(ast);
+ dev = &ast->base;
+
+ ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2300_device_quirks);
+
+ ast->dclk_table = ast_2000_dclk_table;
+
+ ast_2300_detect_tx_chip(ast);
+
+ if (need_post) {
+ ret = ast_post_gpu(ast);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* map reserved buffer */
+ ast->dp501_fw_buf = NULL;
+ if (ast->vram_size < pci_resource_len(pdev, 0)) {
+ ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0);
+ if (!ast->dp501_fw_buf)
+ drm_info(dev, "failed to map reserved buffer!\n");
+ }
+
+ ast_2300_detect_widescreen(ast);
+
+ ret = ast_mode_config_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dev;
+}
diff --git a/drivers/gpu/drm/ast/ast_2400.c b/drivers/gpu/drm/ast/ast_2400.c
new file mode 100644
index 000000000000..2e6befd24f91
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2400.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
+
+#include "ast_drv.h"
+
+static void ast_2400_detect_widescreen(struct ast_device *ast)
+{
+ if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST1400) {
+ ast->support_wsxga_p = true;
+ ast->support_fullhd = true;
+ }
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+}
+
+static const struct ast_device_quirks ast_2400_device_quirks = {
+ .crtc_mem_req_threshold_low = 96,
+ .crtc_mem_req_threshold_high = 120,
+};
+
+struct drm_device *ast_2400_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
+{
+ struct drm_device *dev;
+ struct ast_device *ast;
+ int ret;
+
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
+ if (IS_ERR(ast))
+ return ERR_CAST(ast);
+ dev = &ast->base;
+
+ ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2400_device_quirks);
+
+ ast->dclk_table = ast_2000_dclk_table;
+
+ ast_2300_detect_tx_chip(ast);
+
+ if (need_post) {
+ ret = ast_post_gpu(ast);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* map reserved buffer */
+ ast->dp501_fw_buf = NULL;
+ if (ast->vram_size < pci_resource_len(pdev, 0)) {
+ ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0);
+ if (!ast->dp501_fw_buf)
+ drm_info(dev, "failed to map reserved buffer!\n");
+ }
+
+ ast_2400_detect_widescreen(ast);
+
+ ret = ast_mode_config_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dev;
+}
diff --git a/drivers/gpu/drm/ast/ast_2500.c b/drivers/gpu/drm/ast/ast_2500.c
index 1e541498ea67..2a52af0ded56 100644
--- a/drivers/gpu/drm/ast/ast_2500.c
+++ b/drivers/gpu/drm/ast/ast_2500.c
@@ -27,7 +27,9 @@
*/
#include <linux/delay.h>
+#include <linux/pci.h>
+#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include "ast_drv.h"
@@ -567,3 +569,107 @@ int ast_2500_post(struct ast_device *ast)
return 0;
}
+
+/*
+ * Mode setting
+ */
+
+const struct ast_vbios_dclk_info ast_2500_dclk_table[] = {
+ {0x2c, 0xe7, 0x03}, /* 00: VCLK25_175 */
+ {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
+ {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */
+ {0x76, 0x63, 0x01}, /* 03: VCLK36 */
+ {0xee, 0x67, 0x01}, /* 04: VCLK40 */
+ {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */
+ {0xc6, 0x64, 0x01}, /* 06: VCLK50 */
+ {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */
+ {0x80, 0x64, 0x00}, /* 08: VCLK65 */
+ {0x7b, 0x63, 0x00}, /* 09: VCLK75 */
+ {0x67, 0x62, 0x00}, /* 0a: VCLK78_75 */
+ {0x7c, 0x62, 0x00}, /* 0b: VCLK94_5 */
+ {0x8e, 0x62, 0x00}, /* 0c: VCLK108 */
+ {0x85, 0x24, 0x00}, /* 0d: VCLK135 */
+ {0x67, 0x22, 0x00}, /* 0e: VCLK157_5 */
+ {0x6a, 0x22, 0x00}, /* 0f: VCLK162 */
+ {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
+ {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
+ {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
+ {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
+ {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
+ {0x47, 0x6c, 0x80}, /* 15: VCLK71 */
+ {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */
+ {0x58, 0x01, 0x42}, /* 17: VCLK119 */
+ {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
+ {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */
+ {0x44, 0x20, 0x43}, /* 1a: VCLK118_25 */
+};
+
+/*
+ * Device initialization
+ */
+
+static void ast_2500_detect_widescreen(struct ast_device *ast)
+{
+ if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST2510) {
+ ast->support_wsxga_p = true;
+ ast->support_fullhd = true;
+ }
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+}
+
+static const struct ast_device_quirks ast_2500_device_quirks = {
+ .crtc_mem_req_threshold_low = 96,
+ .crtc_mem_req_threshold_high = 120,
+ .crtc_hsync_precatch_needed = true,
+};
+
+struct drm_device *ast_2500_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
+{
+ struct drm_device *dev;
+ struct ast_device *ast;
+ int ret;
+
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
+ if (IS_ERR(ast))
+ return ERR_CAST(ast);
+ dev = &ast->base;
+
+ ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2500_device_quirks);
+
+ ast->dclk_table = ast_2500_dclk_table;
+
+ ast_2300_detect_tx_chip(ast);
+
+ if (need_post) {
+ ret = ast_post_gpu(ast);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* map reserved buffer */
+ ast->dp501_fw_buf = NULL;
+ if (ast->vram_size < pci_resource_len(pdev, 0)) {
+ ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0);
+ if (!ast->dp501_fw_buf)
+ drm_info(dev, "failed to map reserved buffer!\n");
+ }
+
+ ast_2500_detect_widescreen(ast);
+
+ ret = ast_mode_config_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dev;
+}
diff --git a/drivers/gpu/drm/ast/ast_2600.c b/drivers/gpu/drm/ast/ast_2600.c
index 8d75a47444f5..dee78fd5b022 100644
--- a/drivers/gpu/drm/ast/ast_2600.c
+++ b/drivers/gpu/drm/ast/ast_2600.c
@@ -26,6 +26,10 @@
* Authors: Dave Airlie <airlied@redhat.com>
*/
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+
#include "ast_drv.h"
#include "ast_post.h"
@@ -42,3 +46,71 @@ int ast_2600_post(struct ast_device *ast)
return 0;
}
+
+/*
+ * Device initialization
+ */
+
+static void ast_2600_detect_widescreen(struct ast_device *ast)
+{
+ ast->support_wsxga_p = true;
+ ast->support_fullhd = true;
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+}
+
+static const struct ast_device_quirks ast_2600_device_quirks = {
+ .crtc_mem_req_threshold_low = 160,
+ .crtc_mem_req_threshold_high = 224,
+ .crtc_hsync_precatch_needed = true,
+ .crtc_hsync_add4_needed = true,
+};
+
+struct drm_device *ast_2600_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
+{
+ struct drm_device *dev;
+ struct ast_device *ast;
+ int ret;
+
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
+ if (IS_ERR(ast))
+ return ERR_CAST(ast);
+ dev = &ast->base;
+
+ ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2600_device_quirks);
+
+ ast->dclk_table = ast_2500_dclk_table;
+
+ ast_2300_detect_tx_chip(ast);
+
+ switch (ast->tx_chip) {
+ case AST_TX_ASTDP:
+ ret = ast_post_gpu(ast);
+ break;
+ default:
+ ret = 0;
+ if (need_post)
+ ret = ast_post_gpu(ast);
+ break;
+ }
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ast_2600_detect_widescreen(ast);
+
+ ret = ast_mode_config_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dev;
+}
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 473faa92d08c..b9a9b050b546 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -37,6 +37,7 @@
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_module.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "ast_drv.h"
@@ -46,6 +47,34 @@ static int ast_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, ast_modeset, int, 0400);
+void ast_device_init(struct ast_device *ast,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ const struct ast_device_quirks *quirks)
+{
+ ast->quirks = quirks;
+ ast->chip = chip;
+ ast->config_mode = config_mode;
+ ast->regs = regs;
+ ast->ioregs = ioregs;
+}
+
+void __ast_device_set_tx_chip(struct ast_device *ast, enum ast_tx_chip tx_chip)
+{
+ static const char * const info_str[] = {
+ "analog VGA",
+ "Sil164 TMDS transmitter",
+ "DP501 DisplayPort transmitter",
+ "ASPEED DisplayPort transmitter",
+ };
+
+ drm_info(&ast->base, "Using %s\n", info_str[tx_chip]);
+
+ ast->tx_chip = tx_chip;
+}
+
/*
* DRM driver
*/
@@ -266,7 +295,7 @@ static int ast_detect_chip(struct pci_dev *pdev,
*chip_out = chip;
*config_mode_out = config_mode;
- return 0;
+ return __AST_CHIP_GEN(chip);
}
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -277,6 +306,7 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *ioregs;
enum ast_config_mode config_mode;
enum ast_chip chip;
+ unsigned int chip_gen;
struct drm_device *drm;
bool need_post = false;
@@ -349,10 +379,43 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
ret = ast_detect_chip(pdev, regs, ioregs, &chip, &config_mode);
- if (ret)
+ if (ret < 0)
return ret;
+ chip_gen = ret;
- drm = ast_device_create(pdev, &ast_driver, chip, config_mode, regs, ioregs, need_post);
+ switch (chip_gen) {
+ case 1:
+ drm = ast_2000_device_create(pdev, &ast_driver, chip, config_mode,
+ regs, ioregs, need_post);
+ break;
+ case 2:
+ drm = ast_2100_device_create(pdev, &ast_driver, chip, config_mode,
+ regs, ioregs, need_post);
+ break;
+ case 3:
+ drm = ast_2200_device_create(pdev, &ast_driver, chip, config_mode,
+ regs, ioregs, need_post);
+ break;
+ case 4:
+ drm = ast_2300_device_create(pdev, &ast_driver, chip, config_mode,
+ regs, ioregs, need_post);
+ break;
+ case 5:
+ drm = ast_2400_device_create(pdev, &ast_driver, chip, config_mode,
+ regs, ioregs, need_post);
+ break;
+ case 6:
+ drm = ast_2500_device_create(pdev, &ast_driver, chip, config_mode,
+ regs, ioregs, need_post);
+ break;
+ case 7:
+ drm = ast_2600_device_create(pdev, &ast_driver, chip, config_mode,
+ regs, ioregs, need_post);
+ break;
+ default:
+ dev_err(&pdev->dev, "Gen%d not supported\n", chip_gen);
+ return -ENODEV;
+ }
if (IS_ERR(drm))
return PTR_ERR(drm);
pci_set_drvdata(pdev, drm);
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index c15aef014f69..7be36a358e74 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -164,9 +164,31 @@ to_ast_connector(struct drm_connector *connector)
* Device
*/
+struct ast_device_quirks {
+ /*
+ * CRTC memory request threshold
+ */
+ unsigned char crtc_mem_req_threshold_low;
+ unsigned char crtc_mem_req_threshold_high;
+
+ /*
+ * Adjust hsync values to load next scanline early. Signalled
+ * by AST2500PreCatchCRT in VBIOS mode flags.
+ */
+ bool crtc_hsync_precatch_needed;
+
+ /*
+ * Workaround for modes with HSync Time that is not a multiple
+ * of 8 (e.g., 1920x1080@60Hz, HSync +44 pixels).
+ */
+ bool crtc_hsync_add4_needed;
+};
+
struct ast_device {
struct drm_device base;
+ const struct ast_device_quirks *quirks;
+
void __iomem *regs;
void __iomem *ioregs;
void __iomem *dp501_fw_buf;
@@ -174,6 +196,8 @@ struct ast_device {
enum ast_config_mode config_mode;
enum ast_chip chip;
+ const struct ast_vbios_dclk_info *dclk_table;
+
void __iomem *vram;
unsigned long vram_base;
unsigned long vram_size;
@@ -217,14 +241,6 @@ static inline struct ast_device *to_ast_device(struct drm_device *dev)
return container_of(dev, struct ast_device, base);
}
-struct drm_device *ast_device_create(struct pci_dev *pdev,
- const struct drm_driver *drv,
- enum ast_chip chip,
- enum ast_config_mode config_mode,
- void __iomem *regs,
- void __iomem *ioregs,
- bool need_post);
-
static inline unsigned long __ast_gen(struct ast_device *ast)
{
return __AST_CHIP_GEN(ast->chip);
@@ -415,21 +431,89 @@ struct ast_crtc_state {
int ast_mm_init(struct ast_device *ast);
+/* ast_drv.c */
+void ast_device_init(struct ast_device *ast,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ const struct ast_device_quirks *quirks);
+void __ast_device_set_tx_chip(struct ast_device *ast, enum ast_tx_chip tx_chip);
+
/* ast_2000.c */
int ast_2000_post(struct ast_device *ast);
+extern const struct ast_vbios_dclk_info ast_2000_dclk_table[];
+void ast_2000_detect_tx_chip(struct ast_device *ast, bool need_post);
+struct drm_device *ast_2000_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
/* ast_2100.c */
int ast_2100_post(struct ast_device *ast);
+bool __ast_2100_detect_wsxga_p(struct ast_device *ast);
+bool __ast_2100_detect_wuxga(struct ast_device *ast);
+struct drm_device *ast_2100_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
+
+/* ast_2200.c */
+struct drm_device *ast_2200_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
/* ast_2300.c */
int ast_2300_post(struct ast_device *ast);
+void ast_2300_detect_tx_chip(struct ast_device *ast);
+struct drm_device *ast_2300_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
+
+/* ast_2400.c */
+struct drm_device *ast_2400_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
/* ast_2500.c */
void ast_2500_patch_ahb(void __iomem *regs);
int ast_2500_post(struct ast_device *ast);
+extern const struct ast_vbios_dclk_info ast_2500_dclk_table[];
+struct drm_device *ast_2500_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
/* ast_2600.c */
int ast_2600_post(struct ast_device *ast);
+struct drm_device *ast_2600_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
/* ast post */
int ast_post_gpu(struct ast_device *ast);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
deleted file mode 100644
index 3eea6a6cdacd..000000000000
--- a/drivers/gpu/drm/ast/ast_main.c
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- */
-/*
- * Authors: Dave Airlie <airlied@redhat.com>
- */
-
-#include <linux/of.h>
-#include <linux/pci.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_gem.h>
-#include <drm/drm_managed.h>
-
-#include "ast_drv.h"
-
-/* Try to detect WSXGA+ on Gen2+ */
-static bool __ast_2100_detect_wsxga_p(struct ast_device *ast)
-{
- u8 vgacrd0 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd0);
-
- if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_BY_BMC))
- return true;
- if (vgacrd0 & AST_IO_VGACRD0_IKVM_WIDESCREEN)
- return true;
-
- return false;
-}
-
-/* Try to detect WUXGA on Gen2+ */
-static bool __ast_2100_detect_wuxga(struct ast_device *ast)
-{
- u8 vgacrd1;
-
- if (ast->support_fullhd) {
- vgacrd1 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd1);
- if (!(vgacrd1 & AST_IO_VGACRD1_SUPPORTS_WUXGA))
- return true;
- }
-
- return false;
-}
-
-static void ast_detect_widescreen(struct ast_device *ast)
-{
- ast->support_wsxga_p = false;
- ast->support_fullhd = false;
- ast->support_wuxga = false;
-
- if (AST_GEN(ast) >= 7) {
- ast->support_wsxga_p = true;
- ast->support_fullhd = true;
- if (__ast_2100_detect_wuxga(ast))
- ast->support_wuxga = true;
- } else if (AST_GEN(ast) >= 6) {
- if (__ast_2100_detect_wsxga_p(ast))
- ast->support_wsxga_p = true;
- else if (ast->chip == AST2510)
- ast->support_wsxga_p = true;
- if (ast->support_wsxga_p)
- ast->support_fullhd = true;
- if (__ast_2100_detect_wuxga(ast))
- ast->support_wuxga = true;
- } else if (AST_GEN(ast) >= 5) {
- if (__ast_2100_detect_wsxga_p(ast))
- ast->support_wsxga_p = true;
- else if (ast->chip == AST1400)
- ast->support_wsxga_p = true;
- if (ast->support_wsxga_p)
- ast->support_fullhd = true;
- if (__ast_2100_detect_wuxga(ast))
- ast->support_wuxga = true;
- } else if (AST_GEN(ast) >= 4) {
- if (__ast_2100_detect_wsxga_p(ast))
- ast->support_wsxga_p = true;
- else if (ast->chip == AST1300)
- ast->support_wsxga_p = true;
- if (ast->support_wsxga_p)
- ast->support_fullhd = true;
- if (__ast_2100_detect_wuxga(ast))
- ast->support_wuxga = true;
- } else if (AST_GEN(ast) >= 3) {
- if (__ast_2100_detect_wsxga_p(ast))
- ast->support_wsxga_p = true;
- if (ast->support_wsxga_p) {
- if (ast->chip == AST2200)
- ast->support_fullhd = true;
- }
- if (__ast_2100_detect_wuxga(ast))
- ast->support_wuxga = true;
- } else if (AST_GEN(ast) >= 2) {
- if (__ast_2100_detect_wsxga_p(ast))
- ast->support_wsxga_p = true;
- if (ast->support_wsxga_p) {
- if (ast->chip == AST2100)
- ast->support_fullhd = true;
- }
- if (__ast_2100_detect_wuxga(ast))
- ast->support_wuxga = true;
- }
-}
-
-static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
-{
- static const char * const info_str[] = {
- "analog VGA",
- "Sil164 TMDS transmitter",
- "DP501 DisplayPort transmitter",
- "ASPEED DisplayPort transmitter",
- };
-
- struct drm_device *dev = &ast->base;
- u8 vgacra3, vgacrd1;
-
- /* Check 3rd Tx option (digital output afaik) */
- ast->tx_chip = AST_TX_NONE;
-
- if (AST_GEN(ast) <= 3) {
- /*
- * VGACRA3 Enhanced Color Mode Register, check if DVO is already
- * enabled, in that case, assume we have a SIL164 TMDS transmitter
- *
- * Don't make that assumption if we the chip wasn't enabled and
- * is at power-on reset, otherwise we'll incorrectly "detect" a
- * SIL164 when there is none.
- */
- if (!need_post) {
- vgacra3 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff);
- if (vgacra3 & AST_IO_VGACRA3_DVO_ENABLED)
- ast->tx_chip = AST_TX_SIL164;
- }
- } else {
- /*
- * On AST GEN4+, look at the configuration set by the SoC in
- * the SOC scratch register #1 bits 11:8 (interestingly marked
- * as "reserved" in the spec)
- */
- vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1,
- AST_IO_VGACRD1_TX_TYPE_MASK);
- switch (vgacrd1) {
- /*
- * GEN4 to GEN6
- */
- case AST_IO_VGACRD1_TX_SIL164_VBIOS:
- ast->tx_chip = AST_TX_SIL164;
- break;
- case AST_IO_VGACRD1_TX_DP501_VBIOS:
- ast->dp501_fw_addr = drmm_kzalloc(dev, 32*1024, GFP_KERNEL);
- if (ast->dp501_fw_addr) {
- /* backup firmware */
- if (ast_backup_fw(ast, ast->dp501_fw_addr, 32*1024)) {
- drmm_kfree(dev, ast->dp501_fw_addr);
- ast->dp501_fw_addr = NULL;
- }
- }
- fallthrough;
- case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW:
- ast->tx_chip = AST_TX_DP501;
- break;
- /*
- * GEN7+
- */
- case AST_IO_VGACRD1_TX_ASTDP:
- ast->tx_chip = AST_TX_ASTDP;
- break;
- /*
- * Several of the listed TX chips are not explicitly supported
- * by the ast driver. If these exist in real-world devices, they
- * are most likely reported as VGA or SIL164 outputs. We warn here
- * to get bug reports for these devices. If none come in for some
- * time, we can begin to fail device probing on these values.
- */
- case AST_IO_VGACRD1_TX_ITE66121_VBIOS:
- drm_warn(dev, "ITE IT66121 detected, 0x%x, Gen%lu\n",
- vgacrd1, AST_GEN(ast));
- break;
- case AST_IO_VGACRD1_TX_CH7003_VBIOS:
- drm_warn(dev, "Chrontel CH7003 detected, 0x%x, Gen%lu\n",
- vgacrd1, AST_GEN(ast));
- break;
- case AST_IO_VGACRD1_TX_ANX9807_VBIOS:
- drm_warn(dev, "Analogix ANX9807 detected, 0x%x, Gen%lu\n",
- vgacrd1, AST_GEN(ast));
- break;
- }
- }
-
- drm_info(dev, "Using %s\n", info_str[ast->tx_chip]);
-}
-
-struct drm_device *ast_device_create(struct pci_dev *pdev,
- const struct drm_driver *drv,
- enum ast_chip chip,
- enum ast_config_mode config_mode,
- void __iomem *regs,
- void __iomem *ioregs,
- bool need_post)
-{
- struct drm_device *dev;
- struct ast_device *ast;
- int ret;
-
- ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
- if (IS_ERR(ast))
- return ERR_CAST(ast);
- dev = &ast->base;
-
- ast->chip = chip;
- ast->config_mode = config_mode;
- ast->regs = regs;
- ast->ioregs = ioregs;
-
- ast_detect_tx_chip(ast, need_post);
- switch (ast->tx_chip) {
- case AST_TX_ASTDP:
- ret = ast_post_gpu(ast);
- break;
- default:
- ret = 0;
- if (need_post)
- ret = ast_post_gpu(ast);
- break;
- }
- if (ret)
- return ERR_PTR(ret);
-
- ret = ast_mm_init(ast);
- if (ret)
- return ERR_PTR(ret);
-
- /* map reserved buffer */
- ast->dp501_fw_buf = NULL;
- if (ast->vram_size < pci_resource_len(pdev, 0)) {
- ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0);
- if (!ast->dp501_fw_buf)
- drm_info(dev, "failed to map reserved buffer!\n");
- }
-
- ast_detect_widescreen(ast);
-
- ret = ast_mode_config_init(ast);
- if (ret)
- return ERR_PTR(ret);
-
- return dev;
-}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 30b011ed0a05..9ce874dba69c 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -241,16 +241,15 @@ static void ast_set_std_reg(struct ast_device *ast,
ast_set_index_reg(ast, AST_IO_VGAGRI, i, stdtable->gr[i]);
}
-static void ast_set_crtc_reg(struct ast_device *ast,
- struct drm_display_mode *mode,
+static void ast_set_crtc_reg(struct ast_device *ast, struct drm_display_mode *mode,
const struct ast_vbios_enhtable *vmode)
{
u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0;
- u16 temp, precache = 0;
+ u16 temp;
+ unsigned char crtc_hsync_precatch = 0;
- if ((IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) &&
- (vmode->flags & AST2500PreCatchCRT))
- precache = 40;
+ if (ast->quirks->crtc_hsync_precatch_needed && (vmode->flags & AST2500PreCatchCRT))
+ crtc_hsync_precatch = 40;
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x11, 0x7f, 0x00);
@@ -276,12 +275,12 @@ static void ast_set_crtc_reg(struct ast_device *ast,
jregAD |= 0x01; /* HBE D[5] */
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x03, 0xE0, (temp & 0x1f));
- temp = ((mode->crtc_hsync_start-precache) >> 3) - 1;
+ temp = ((mode->crtc_hsync_start - crtc_hsync_precatch) >> 3) - 1;
if (temp & 0x100)
jregAC |= 0x40; /* HRS D[5] */
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x04, 0x00, temp);
- temp = (((mode->crtc_hsync_end-precache) >> 3) - 1) & 0x3f;
+ temp = (((mode->crtc_hsync_end - crtc_hsync_precatch) >> 3) - 1) & 0x3f;
if (temp & 0x20)
jregAD |= 0x04; /* HRE D[5] */
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x05, 0x60, (u8)((temp & 0x1f) | jreg05));
@@ -289,8 +288,7 @@ static void ast_set_crtc_reg(struct ast_device *ast,
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAC, 0x00, jregAC);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAD, 0x00, jregAD);
- // Workaround for HSync Time non octave pixels (1920x1080@60Hz HSync 44 pixels);
- if (IS_AST_GEN7(ast) && (mode->crtc_vdisplay == 1080))
+ if (ast->quirks->crtc_hsync_add4_needed && mode->crtc_vdisplay == 1080)
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xFC, 0xFD, 0x02);
else
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xFC, 0xFD, 0x00);
@@ -348,7 +346,7 @@ static void ast_set_crtc_reg(struct ast_device *ast,
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x09, 0xdf, jreg09);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAE, 0x00, (jregAE | 0x80));
- if (precache)
+ if (crtc_hsync_precatch)
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0x3f, 0x80);
else
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0x3f, 0x00);
@@ -370,12 +368,7 @@ static void ast_set_dclk_reg(struct ast_device *ast,
struct drm_display_mode *mode,
const struct ast_vbios_enhtable *vmode)
{
- const struct ast_vbios_dclk_info *clk_info;
-
- if (IS_AST_GEN6(ast) || IS_AST_GEN7(ast))
- clk_info = &dclk_table_ast2500[vmode->dclk_index];
- else
- clk_info = &dclk_table[vmode->dclk_index];
+ const struct ast_vbios_dclk_info *clk_info = &ast->dclk_table[vmode->dclk_index];
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc0, 0x00, clk_info->param1);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc1, 0x00, clk_info->param2);
@@ -415,20 +408,11 @@ static void ast_set_color_reg(struct ast_device *ast,
static void ast_set_crtthd_reg(struct ast_device *ast)
{
- /* Set Threshold */
- if (IS_AST_GEN7(ast)) {
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0xe0);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0xa0);
- } else if (IS_AST_GEN6(ast) || IS_AST_GEN5(ast) || IS_AST_GEN4(ast)) {
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x78);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x60);
- } else if (IS_AST_GEN3(ast) || IS_AST_GEN2(ast)) {
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x3f);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x2f);
- } else {
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x2f);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x1f);
- }
+ u8 vgacra6 = ast->quirks->crtc_mem_req_threshold_low;
+ u8 vgacra7 = ast->quirks->crtc_mem_req_threshold_high;
+
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, vgacra7);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, vgacra6);
}
static void ast_set_sync_reg(struct ast_device *ast,
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
index f1c9f7e1f1fc..7da5b5c60f41 100644
--- a/drivers/gpu/drm/ast/ast_tables.h
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -33,66 +33,6 @@
#define HiCModeIndex 3
#define TrueCModeIndex 4
-static const struct ast_vbios_dclk_info dclk_table[] = {
- {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */
- {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
- {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */
- {0x76, 0x63, 0x01}, /* 03: VCLK36 */
- {0xEE, 0x67, 0x01}, /* 04: VCLK40 */
- {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */
- {0xC6, 0x64, 0x01}, /* 06: VCLK50 */
- {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */
- {0x80, 0x64, 0x00}, /* 08: VCLK65 */
- {0x7B, 0x63, 0x00}, /* 09: VCLK75 */
- {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */
- {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */
- {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */
- {0x85, 0x24, 0x00}, /* 0D: VCLK135 */
- {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
- {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
- {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
- {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
- {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
- {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
- {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
- {0x47, 0x6c, 0x80}, /* 15: VCLK71 */
- {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */
- {0x77, 0x58, 0x80}, /* 17: VCLK119 */
- {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
- {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */
- {0x3b, 0x2c, 0x81}, /* 1A: VCLK118_25 */
-};
-
-static const struct ast_vbios_dclk_info dclk_table_ast2500[] = {
- {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */
- {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
- {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */
- {0x76, 0x63, 0x01}, /* 03: VCLK36 */
- {0xEE, 0x67, 0x01}, /* 04: VCLK40 */
- {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */
- {0xC6, 0x64, 0x01}, /* 06: VCLK50 */
- {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */
- {0x80, 0x64, 0x00}, /* 08: VCLK65 */
- {0x7B, 0x63, 0x00}, /* 09: VCLK75 */
- {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */
- {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */
- {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */
- {0x85, 0x24, 0x00}, /* 0D: VCLK135 */
- {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
- {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
- {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
- {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
- {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
- {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
- {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
- {0x47, 0x6c, 0x80}, /* 15: VCLK71 */
- {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */
- {0x58, 0x01, 0x42}, /* 17: VCLK119 */
- {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
- {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */
- {0x44, 0x20, 0x43}, /* 1A: VCLK118_25 */
-};
-
static const struct ast_vbios_stdtable vbios_stdtable[] = {
/* MD_2_3_400 */
{
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 0f7ffb3ced20..e0efc7309b1b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -20,6 +20,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -215,32 +216,32 @@ static void atmel_hlcdc_crtc_atomic_disable(struct drm_crtc *c,
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
!(status & ATMEL_XLCDC_CM),
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register CMSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register CMSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_XLCDC_SD);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
status & ATMEL_XLCDC_SD,
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register SDSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register SDSTS timeout\n");
}
regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_DISP);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
!(status & ATMEL_HLCDC_DISP),
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register DISPSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register DISPSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_SYNC);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
!(status & ATMEL_HLCDC_SYNC),
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register LCDSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register LCDSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PIXEL_CLK);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
!(status & ATMEL_HLCDC_PIXEL_CLK),
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register CLKSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register CLKSTS timeout\n");
clk_disable_unprepare(crtc->dc->hlcdc->sys_clk);
pinctrl_pm_select_sleep_state(dev->dev);
@@ -269,32 +270,32 @@ static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c,
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
status & ATMEL_HLCDC_PIXEL_CLK,
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register CLKSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register CLKSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_SYNC);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
status & ATMEL_HLCDC_SYNC,
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register LCDSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register LCDSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_DISP);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
status & ATMEL_HLCDC_DISP,
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register DISPSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register DISPSTS timeout\n");
if (crtc->dc->desc->is_xlcdc) {
regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_XLCDC_CM);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
status & ATMEL_XLCDC_CM,
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register CMSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register CMSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_XLCDC_SD);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
!(status & ATMEL_XLCDC_SD),
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register SDSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register SDSTS timeout\n");
}
pm_runtime_put_sync(dev->dev);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index fa8ad94e431a..acb017a2486b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -724,19 +724,19 @@ static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev)
ret = atmel_hlcdc_create_outputs(dev);
if (ret) {
- dev_err(dev->dev, "failed to create HLCDC outputs: %d\n", ret);
+ drm_err(dev, "failed to create HLCDC outputs: %d\n", ret);
return ret;
}
ret = atmel_hlcdc_create_planes(dev);
if (ret) {
- dev_err(dev->dev, "failed to create planes: %d\n", ret);
+ drm_err(dev, "failed to create planes: %d\n", ret);
return ret;
}
ret = atmel_hlcdc_crtc_create(dev);
if (ret) {
- dev_err(dev->dev, "failed to create crtc\n");
+ drm_err(dev, "failed to create crtc\n");
return ret;
}
@@ -778,7 +778,7 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
ret = clk_prepare_enable(dc->hlcdc->periph_clk);
if (ret) {
- dev_err(dev->dev, "failed to enable periph_clk\n");
+ drm_err(dev, "failed to enable periph_clk\n");
return ret;
}
@@ -786,13 +786,13 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
ret = drm_vblank_init(dev, 1);
if (ret < 0) {
- dev_err(dev->dev, "failed to initialize vblank\n");
+ drm_err(dev, "failed to initialize vblank\n");
goto err_periph_clk_disable;
}
ret = atmel_hlcdc_dc_modeset_init(dev);
if (ret < 0) {
- dev_err(dev->dev, "failed to initialize mode setting\n");
+ drm_err(dev, "failed to initialize mode setting\n");
goto err_periph_clk_disable;
}
@@ -802,7 +802,7 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
ret = atmel_hlcdc_dc_irq_install(dev, dc->hlcdc->irq);
pm_runtime_put_sync(dev->dev);
if (ret < 0) {
- dev_err(dev->dev, "failed to install IRQ handler\n");
+ drm_err(dev, "failed to install IRQ handler\n");
goto err_periph_clk_disable;
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index e1a0bb24b511..53d47f01db0b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -378,7 +378,8 @@ struct atmel_lcdc_dc_ops {
void (*lcdc_update_buffers)(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state,
u32 sr, int i);
- void (*lcdc_atomic_disable)(struct atmel_hlcdc_plane *plane);
+ void (*lcdc_atomic_disable)(struct atmel_hlcdc_plane *plane,
+ struct atmel_hlcdc_dc *dc);
void (*lcdc_update_general_settings)(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state);
void (*lcdc_atomic_update)(struct atmel_hlcdc_plane *plane,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 50fee6a93964..0b8a86afb096 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -15,6 +15,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_encoder.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include "atmel_hlcdc_dc.h"
@@ -92,7 +93,7 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
output->bus_fmt = atmel_hlcdc_of_bus_fmt(ep);
of_node_put(ep);
if (output->bus_fmt < 0) {
- dev_err(dev->dev, "endpoint %d: invalid bus width\n", endpoint);
+ drm_err(dev, "endpoint %d: invalid bus width\n", endpoint);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 4a7ba0918eca..38f60befd7d7 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -365,13 +365,34 @@ void atmel_xlcdc_plane_setup_scaler(struct atmel_hlcdc_plane *plane,
xfactor);
/*
- * With YCbCr 4:2:2 and YCbYcr 4:2:0 window resampling, configuration
- * register LCDC_HEOCFG25.VXSCFACT and LCDC_HEOCFG27.HXSCFACT is half
+ * With YCbCr 4:2:0 window resampling, configuration register
+ * LCDC_HEOCFG25.VXSCFACT and LCDC_HEOCFG27.HXSCFACT values are half
* the value of yfactor and xfactor.
+ *
+ * On the other hand, with YCbCr 4:2:2 window resampling, only the
+ * configuration register LCDC_HEOCFG27.HXSCFACT value is half the value
+ * of the xfactor; the value of LCDC_HEOCFG25.VXSCFACT is yfactor (no
+ * division by 2).
*/
- if (state->base.fb->format->format == DRM_FORMAT_YUV420) {
+ switch (state->base.fb->format->format) {
+ /* YCbCr 4:2:2 */
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_NV61:
+ xfactor /= 2;
+ break;
+
+ /* YCbCr 4:2:0 */
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_NV21:
yfactor /= 2;
xfactor /= 2;
+ break;
+ default:
+ break;
}
atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.scaler_config + 2,
@@ -714,7 +735,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
if (!hstate->base.crtc || WARN_ON(!fb))
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state, s->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, s->crtc);
mode = &crtc_state->adjusted_mode;
ret = drm_atomic_helper_check_plane_state(s, crtc_state,
@@ -816,7 +837,8 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
return 0;
}
-static void atmel_hlcdc_atomic_disable(struct atmel_hlcdc_plane *plane)
+static void atmel_hlcdc_atomic_disable(struct atmel_hlcdc_plane *plane,
+ struct atmel_hlcdc_dc *dc)
{
/* Disable interrupts */
atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_IDR,
@@ -832,7 +854,8 @@ static void atmel_hlcdc_atomic_disable(struct atmel_hlcdc_plane *plane)
atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_ISR);
}
-static void atmel_xlcdc_atomic_disable(struct atmel_hlcdc_plane *plane)
+static void atmel_xlcdc_atomic_disable(struct atmel_hlcdc_plane *plane,
+ struct atmel_hlcdc_dc *dc)
{
/* Disable interrupts */
atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_XLCDC_LAYER_IDR,
@@ -842,6 +865,15 @@ static void atmel_xlcdc_atomic_disable(struct atmel_hlcdc_plane *plane)
atmel_hlcdc_layer_write_reg(&plane->layer,
ATMEL_XLCDC_LAYER_ENR, 0);
+ /*
+ * Updating XLCDC_xxxCFGx, XLCDC_xxxFBA and XLCDC_xxxEN,
+ * (where xxx indicates each layer) requires writing one to the
+ * Update Attribute field for each layer in LCDC_ATTRE register for SAM9X7.
+ */
+ regmap_write(dc->hlcdc->regmap, ATMEL_XLCDC_ATTRE, ATMEL_XLCDC_BASE_UPDATE |
+ ATMEL_XLCDC_OVR1_UPDATE | ATMEL_XLCDC_OVR3_UPDATE |
+ ATMEL_XLCDC_HEO_UPDATE);
+
/* Clear all pending interrupts */
atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_XLCDC_LAYER_ISR);
}
@@ -852,7 +884,7 @@ static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
struct atmel_hlcdc_dc *dc = plane->base.dev->dev_private;
- dc->desc->ops->lcdc_atomic_disable(plane);
+ dc->desc->ops->lcdc_atomic_disable(plane, dc);
}
static void atmel_hlcdc_atomic_update(struct atmel_hlcdc_plane *plane,
@@ -1034,7 +1066,7 @@ static void atmel_hlcdc_irq_dbg(struct atmel_hlcdc_plane *plane,
if (isr &
(ATMEL_HLCDC_LAYER_OVR_IRQ(0) | ATMEL_HLCDC_LAYER_OVR_IRQ(1) |
ATMEL_HLCDC_LAYER_OVR_IRQ(2)))
- dev_dbg(plane->base.dev->dev, "overrun on plane %s\n",
+ drm_dbg(plane->base.dev, "overrun on plane %s\n",
desc->name);
}
@@ -1051,7 +1083,7 @@ static void atmel_xlcdc_irq_dbg(struct atmel_hlcdc_plane *plane,
if (isr &
(ATMEL_XLCDC_LAYER_OVR_IRQ(0) | ATMEL_XLCDC_LAYER_OVR_IRQ(1) |
ATMEL_XLCDC_LAYER_OVR_IRQ(2)))
- dev_dbg(plane->base.dev->dev, "overrun on plane %s\n",
+ drm_dbg(plane->base.dev, "overrun on plane %s\n",
desc->name);
}
@@ -1140,7 +1172,7 @@ static void atmel_hlcdc_plane_reset(struct drm_plane *p)
if (state) {
if (atmel_hlcdc_plane_alloc_dscrs(p, state)) {
kfree(state);
- dev_err(p->dev->dev,
+ drm_err(p->dev,
"Failed to allocate initial plane state\n");
return;
}
diff --git a/drivers/gpu/drm/bridge/imx/Kconfig b/drivers/gpu/drm/bridge/imx/Kconfig
index 9a480c6abb85..b9028a5e5a06 100644
--- a/drivers/gpu/drm/bridge/imx/Kconfig
+++ b/drivers/gpu/drm/bridge/imx/Kconfig
@@ -18,12 +18,23 @@ config DRM_IMX8MP_DW_HDMI_BRIDGE
depends on OF
depends on COMMON_CLK
select DRM_DW_HDMI
+ imply DRM_IMX8MP_HDMI_PAI
imply DRM_IMX8MP_HDMI_PVI
imply PHY_FSL_SAMSUNG_HDMI_PHY
help
Choose this to enable support for the internal HDMI encoder found
on the i.MX8MP SoC.
+config DRM_IMX8MP_HDMI_PAI
+ tristate "Freescale i.MX8MP HDMI PAI bridge support"
+ depends on OF
+ select DRM_DW_HDMI
+ select REGMAP
+ select REGMAP_MMIO
+ help
+ Choose this to enable support for the internal HDMI TX Parallel
+ Audio Interface found on the Freescale i.MX8MP SoC.
+
config DRM_IMX8MP_HDMI_PVI
tristate "Freescale i.MX8MP HDMI PVI bridge support"
depends on OF
diff --git a/drivers/gpu/drm/bridge/imx/Makefile b/drivers/gpu/drm/bridge/imx/Makefile
index dd5d48584806..8d01fda25451 100644
--- a/drivers/gpu/drm/bridge/imx/Makefile
+++ b/drivers/gpu/drm/bridge/imx/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_DRM_IMX_LDB_HELPER) += imx-ldb-helper.o
obj-$(CONFIG_DRM_IMX_LEGACY_BRIDGE) += imx-legacy-bridge.o
obj-$(CONFIG_DRM_IMX8MP_DW_HDMI_BRIDGE) += imx8mp-hdmi-tx.o
+obj-$(CONFIG_DRM_IMX8MP_HDMI_PAI) += imx8mp-hdmi-pai.o
obj-$(CONFIG_DRM_IMX8MP_HDMI_PVI) += imx8mp-hdmi-pvi.o
obj-$(CONFIG_DRM_IMX8QM_LDB) += imx8qm-ldb.o
obj-$(CONFIG_DRM_IMX8QXP_LDB) += imx8qxp-ldb.o
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c
new file mode 100644
index 000000000000..8d13a35b206a
--- /dev/null
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2025 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <drm/bridge/dw_hdmi.h>
+#include <sound/asoundef.h>
+
+#define HTX_PAI_CTRL 0x00
+#define ENABLE BIT(0)
+
+#define HTX_PAI_CTRL_EXT 0x04
+#define WTMK_HIGH_MASK GENMASK(31, 24)
+#define WTMK_LOW_MASK GENMASK(23, 16)
+#define NUM_CH_MASK GENMASK(10, 8)
+#define WTMK_HIGH(n) FIELD_PREP(WTMK_HIGH_MASK, (n))
+#define WTMK_LOW(n) FIELD_PREP(WTMK_LOW_MASK, (n))
+#define NUM_CH(n) FIELD_PREP(NUM_CH_MASK, (n) - 1)
+
+#define HTX_PAI_FIELD_CTRL 0x08
+#define PRE_SEL GENMASK(28, 24)
+#define D_SEL GENMASK(23, 20)
+#define V_SEL GENMASK(19, 15)
+#define U_SEL GENMASK(14, 10)
+#define C_SEL GENMASK(9, 5)
+#define P_SEL GENMASK(4, 0)
+
+struct imx8mp_hdmi_pai {
+ struct regmap *regmap;
+};
+
+static void imx8mp_hdmi_pai_enable(struct dw_hdmi *dw_hdmi, int channel,
+ int width, int rate, int non_pcm,
+ int iec958)
+{
+ const struct dw_hdmi_plat_data *pdata = dw_hdmi_to_plat_data(dw_hdmi);
+ struct imx8mp_hdmi_pai *hdmi_pai = pdata->priv_audio;
+ int val;
+
+ /* PAI set control extended */
+ val = WTMK_HIGH(3) | WTMK_LOW(3);
+ val |= NUM_CH(channel);
+ regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL_EXT, val);
+
+ /* IEC60958 format */
+ if (iec958) {
+ val = FIELD_PREP_CONST(P_SEL,
+ __bf_shf(IEC958_SUBFRAME_PARITY));
+ val |= FIELD_PREP_CONST(C_SEL,
+ __bf_shf(IEC958_SUBFRAME_CHANNEL_STATUS));
+ val |= FIELD_PREP_CONST(U_SEL,
+ __bf_shf(IEC958_SUBFRAME_USER_DATA));
+ val |= FIELD_PREP_CONST(V_SEL,
+ __bf_shf(IEC958_SUBFRAME_VALIDITY));
+ val |= FIELD_PREP_CONST(D_SEL,
+ __bf_shf(IEC958_SUBFRAME_SAMPLE_24_MASK));
+ val |= FIELD_PREP_CONST(PRE_SEL,
+ __bf_shf(IEC958_SUBFRAME_PREAMBLE_MASK));
+ } else {
+ /*
+ * The allowed PCM widths are 24bit and 32bit, as they are supported
+ * by aud2htx module.
+ * for 24bit, D_SEL = 0, select all the bits.
+ * for 32bit, D_SEL = 8, select 24bit in MSB.
+ */
+ val = FIELD_PREP(D_SEL, width - 24);
+ }
+
+ regmap_write(hdmi_pai->regmap, HTX_PAI_FIELD_CTRL, val);
+
+ /* PAI start running */
+ regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL, ENABLE);
+}
+
+static void imx8mp_hdmi_pai_disable(struct dw_hdmi *dw_hdmi)
+{
+ const struct dw_hdmi_plat_data *pdata = dw_hdmi_to_plat_data(dw_hdmi);
+ struct imx8mp_hdmi_pai *hdmi_pai = pdata->priv_audio;
+
+ /* Stop PAI */
+ regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL, 0);
+}
+
+static const struct regmap_config imx8mp_hdmi_pai_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = HTX_PAI_FIELD_CTRL,
+};
+
+static int imx8mp_hdmi_pai_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_hdmi_plat_data *plat_data = data;
+ struct imx8mp_hdmi_pai *hdmi_pai;
+ struct resource *res;
+ void __iomem *base;
+
+ hdmi_pai = devm_kzalloc(dev, sizeof(*hdmi_pai), GFP_KERNEL);
+ if (!hdmi_pai)
+ return -ENOMEM;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ hdmi_pai->regmap = devm_regmap_init_mmio_clk(dev, "apb", base,
+ &imx8mp_hdmi_pai_regmap_config);
+ if (IS_ERR(hdmi_pai->regmap)) {
+ dev_err(dev, "regmap init failed\n");
+ return PTR_ERR(hdmi_pai->regmap);
+ }
+
+ plat_data->enable_audio = imx8mp_hdmi_pai_enable;
+ plat_data->disable_audio = imx8mp_hdmi_pai_disable;
+ plat_data->priv_audio = hdmi_pai;
+
+ return 0;
+}
+
+static const struct component_ops imx8mp_hdmi_pai_ops = {
+ .bind = imx8mp_hdmi_pai_bind,
+};
+
+static int imx8mp_hdmi_pai_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &imx8mp_hdmi_pai_ops);
+}
+
+static void imx8mp_hdmi_pai_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &imx8mp_hdmi_pai_ops);
+}
+
+static const struct of_device_id imx8mp_hdmi_pai_of_table[] = {
+ { .compatible = "fsl,imx8mp-hdmi-pai" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx8mp_hdmi_pai_of_table);
+
+static struct platform_driver imx8mp_hdmi_pai_platform_driver = {
+ .probe = imx8mp_hdmi_pai_probe,
+ .remove = imx8mp_hdmi_pai_remove,
+ .driver = {
+ .name = "imx8mp-hdmi-pai",
+ .of_match_table = imx8mp_hdmi_pai_of_table,
+ },
+};
+module_platform_driver(imx8mp_hdmi_pai_platform_driver);
+
+MODULE_DESCRIPTION("i.MX8MP HDMI PAI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
index 1e7a789ec289..32fd3554e267 100644
--- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
@@ -5,11 +5,13 @@
*/
#include <linux/clk.h>
+#include <linux/component.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_modes.h>
+#include <drm/drm_of.h>
struct imx8mp_hdmi {
struct dw_hdmi_plat_data plat_data;
@@ -79,10 +81,45 @@ static const struct dw_hdmi_phy_ops imx8mp_hdmi_phy_ops = {
.update_hpd = dw_hdmi_phy_update_hpd,
};
+static int imx8mp_dw_hdmi_bind(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct imx8mp_hdmi *hdmi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = component_bind_all(dev, &hdmi->plat_data);
+ if (ret)
+ return dev_err_probe(dev, ret, "component_bind_all failed!\n");
+
+ hdmi->dw_hdmi = dw_hdmi_probe(pdev, &hdmi->plat_data);
+ if (IS_ERR(hdmi->dw_hdmi)) {
+ component_unbind_all(dev, &hdmi->plat_data);
+ return PTR_ERR(hdmi->dw_hdmi);
+ }
+
+ return 0;
+}
+
+static void imx8mp_dw_hdmi_unbind(struct device *dev)
+{
+ struct imx8mp_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dw_hdmi_remove(hdmi->dw_hdmi);
+
+ component_unbind_all(dev, &hdmi->plat_data);
+}
+
+static const struct component_master_ops imx8mp_dw_hdmi_ops = {
+ .bind = imx8mp_dw_hdmi_bind,
+ .unbind = imx8mp_dw_hdmi_unbind,
+};
+
static int imx8mp_dw_hdmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_hdmi_plat_data *plat_data;
+ struct component_match *match = NULL;
+ struct device_node *remote;
struct imx8mp_hdmi *hdmi;
hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
@@ -102,20 +139,38 @@ static int imx8mp_dw_hdmi_probe(struct platform_device *pdev)
plat_data->priv_data = hdmi;
plat_data->phy_force_vendor = true;
- hdmi->dw_hdmi = dw_hdmi_probe(pdev, plat_data);
- if (IS_ERR(hdmi->dw_hdmi))
- return PTR_ERR(hdmi->dw_hdmi);
-
platform_set_drvdata(pdev, hdmi);
+ /* port@2 is for hdmi_pai device */
+ remote = of_graph_get_remote_node(pdev->dev.of_node, 2, 0);
+ if (!remote) {
+ hdmi->dw_hdmi = dw_hdmi_probe(pdev, plat_data);
+ if (IS_ERR(hdmi->dw_hdmi))
+ return PTR_ERR(hdmi->dw_hdmi);
+ } else {
+ drm_of_component_match_add(dev, &match, component_compare_of, remote);
+
+ of_node_put(remote);
+
+ return component_master_add_with_match(dev, &imx8mp_dw_hdmi_ops, match);
+ }
+
return 0;
}
static void imx8mp_dw_hdmi_remove(struct platform_device *pdev)
{
struct imx8mp_hdmi *hdmi = platform_get_drvdata(pdev);
+ struct device_node *remote;
- dw_hdmi_remove(hdmi->dw_hdmi);
+ remote = of_graph_get_remote_node(pdev->dev.of_node, 2, 0);
+ if (remote) {
+ of_node_put(remote);
+
+ component_master_del(&pdev->dev, &imx8mp_dw_hdmi_ops);
+ } else {
+ dw_hdmi_remove(hdmi->dw_hdmi);
+ }
}
static int imx8mp_dw_hdmi_pm_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
index 5d272916e200..122502968927 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
@@ -683,11 +683,6 @@ static void imx8qxp_ldb_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-static int imx8qxp_ldb_runtime_suspend(struct device *dev)
-{
- return 0;
-}
-
static int imx8qxp_ldb_runtime_resume(struct device *dev)
{
struct imx8qxp_ldb *imx8qxp_ldb = dev_get_drvdata(dev);
@@ -700,7 +695,7 @@ static int imx8qxp_ldb_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops imx8qxp_ldb_pm_ops = {
- RUNTIME_PM_OPS(imx8qxp_ldb_runtime_suspend, imx8qxp_ldb_runtime_resume, NULL)
+ RUNTIME_PM_OPS(NULL, imx8qxp_ldb_runtime_resume, NULL)
};
static const struct of_device_id imx8qxp_ldb_dt_ids[] = {
diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig
index 2c5e532410de..a46df7583bcf 100644
--- a/drivers/gpu/drm/bridge/synopsys/Kconfig
+++ b/drivers/gpu/drm/bridge/synopsys/Kconfig
@@ -61,6 +61,14 @@ config DRM_DW_HDMI_QP
select DRM_KMS_HELPER
select REGMAP_MMIO
+config DRM_DW_HDMI_QP_CEC
+ bool "Synopsis Designware QP CEC interface"
+ depends on DRM_DW_HDMI_QP
+ select DRM_DISPLAY_HDMI_CEC_HELPER
+ help
+ Support the CEC interface which is part of the Synopsys
+ Designware HDMI QP block.
+
config DRM_DW_MIPI_DSI
tristate
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
index ab18f9a3bf23..df7a37eb47f4 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
@@ -90,6 +90,11 @@ static int audio_hw_params(struct device *dev, void *data,
params->iec.status[0] & IEC958_AES0_NONAUDIO);
dw_hdmi_set_sample_width(dw->data.hdmi, params->sample_width);
+ if (daifmt->bit_fmt == SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE)
+ dw_hdmi_set_sample_iec958(dw->data.hdmi, 1);
+ else
+ dw_hdmi_set_sample_iec958(dw->data.hdmi, 0);
+
return 0;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
index 39332c57f2c5..4ba7b339eff6 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
@@ -18,6 +18,7 @@
#include <drm/bridge/dw_hdmi_qp.h>
#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -26,6 +27,8 @@
#include <drm/drm_edid.h>
#include <drm/drm_modes.h>
+#include <media/cec.h>
+
#include <sound/hdmi-codec.h>
#include "dw-hdmi-qp.h"
@@ -131,17 +134,34 @@ struct dw_hdmi_qp_i2c {
bool is_segment;
};
+#ifdef CONFIG_DRM_DW_HDMI_QP_CEC
+struct dw_hdmi_qp_cec {
+ struct drm_connector *connector;
+ int irq;
+ u32 addresses;
+ struct cec_msg rx_msg;
+ u8 tx_status;
+ bool tx_done;
+ bool rx_done;
+};
+#endif
+
struct dw_hdmi_qp {
struct drm_bridge bridge;
struct device *dev;
struct dw_hdmi_qp_i2c *i2c;
+#ifdef CONFIG_DRM_DW_HDMI_QP_CEC
+ struct dw_hdmi_qp_cec *cec;
+#endif
+
struct {
const struct dw_hdmi_qp_phy_ops *ops;
void *data;
} phy;
+ unsigned long ref_clk_rate;
struct regmap *regm;
unsigned long tmds_char_rate;
@@ -965,6 +985,179 @@ static int dw_hdmi_qp_bridge_write_infoframe(struct drm_bridge *bridge,
}
}
+#ifdef CONFIG_DRM_DW_HDMI_QP_CEC
+static irqreturn_t dw_hdmi_qp_cec_hardirq(int irq, void *dev_id)
+{
+ struct dw_hdmi_qp *hdmi = dev_id;
+ struct dw_hdmi_qp_cec *cec = hdmi->cec;
+ irqreturn_t ret = IRQ_HANDLED;
+ u32 stat;
+
+ stat = dw_hdmi_qp_read(hdmi, CEC_INT_STATUS);
+ if (stat == 0)
+ return IRQ_NONE;
+
+ dw_hdmi_qp_write(hdmi, stat, CEC_INT_CLEAR);
+
+ if (stat & CEC_STAT_LINE_ERR) {
+ cec->tx_status = CEC_TX_STATUS_ERROR;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ } else if (stat & CEC_STAT_DONE) {
+ cec->tx_status = CEC_TX_STATUS_OK;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ } else if (stat & CEC_STAT_NACK) {
+ cec->tx_status = CEC_TX_STATUS_NACK;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ if (stat & CEC_STAT_EOM) {
+ unsigned int len, i, val;
+
+ val = dw_hdmi_qp_read(hdmi, CEC_RX_COUNT_STATUS);
+ len = (val & 0xf) + 1;
+
+ if (len > sizeof(cec->rx_msg.msg))
+ len = sizeof(cec->rx_msg.msg);
+
+ for (i = 0; i < 4; i++) {
+ val = dw_hdmi_qp_read(hdmi, CEC_RX_DATA3_0 + i * 4);
+ cec->rx_msg.msg[i * 4] = val & 0xff;
+ cec->rx_msg.msg[i * 4 + 1] = (val >> 8) & 0xff;
+ cec->rx_msg.msg[i * 4 + 2] = (val >> 16) & 0xff;
+ cec->rx_msg.msg[i * 4 + 3] = (val >> 24) & 0xff;
+ }
+
+ dw_hdmi_qp_write(hdmi, 1, CEC_LOCK_CONTROL);
+
+ cec->rx_msg.len = len;
+ cec->rx_done = true;
+
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ return ret;
+}
+
+static irqreturn_t dw_hdmi_qp_cec_thread(int irq, void *dev_id)
+{
+ struct dw_hdmi_qp *hdmi = dev_id;
+ struct dw_hdmi_qp_cec *cec = hdmi->cec;
+
+ if (cec->tx_done) {
+ cec->tx_done = false;
+ drm_connector_hdmi_cec_transmit_attempt_done(cec->connector,
+ cec->tx_status);
+ }
+
+ if (cec->rx_done) {
+ cec->rx_done = false;
+ drm_connector_hdmi_cec_received_msg(cec->connector, &cec->rx_msg);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int dw_hdmi_qp_cec_init(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
+ struct dw_hdmi_qp_cec *cec = hdmi->cec;
+
+ cec->connector = connector;
+
+ dw_hdmi_qp_write(hdmi, 0, CEC_TX_COUNT);
+ dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR);
+ dw_hdmi_qp_write(hdmi, 0, CEC_INT_MASK_N);
+
+ return devm_request_threaded_irq(hdmi->dev, cec->irq,
+ dw_hdmi_qp_cec_hardirq,
+ dw_hdmi_qp_cec_thread, IRQF_SHARED,
+ dev_name(hdmi->dev), hdmi);
+}
+
+static int dw_hdmi_qp_cec_log_addr(struct drm_bridge *bridge, u8 logical_addr)
+{
+ struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
+ struct dw_hdmi_qp_cec *cec = hdmi->cec;
+
+ if (logical_addr == CEC_LOG_ADDR_INVALID)
+ cec->addresses = 0;
+ else
+ cec->addresses |= BIT(logical_addr) | CEC_ADDR_BROADCAST;
+
+ dw_hdmi_qp_write(hdmi, cec->addresses, CEC_ADDR);
+
+ return 0;
+}
+
+static int dw_hdmi_qp_cec_enable(struct drm_bridge *bridge, bool enable)
+{
+ struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
+ unsigned int irqs;
+ u32 swdisable;
+
+ if (!enable) {
+ dw_hdmi_qp_write(hdmi, 0, CEC_INT_MASK_N);
+ dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR);
+
+ swdisable = dw_hdmi_qp_read(hdmi, GLOBAL_SWDISABLE);
+ swdisable = swdisable | CEC_SWDISABLE;
+ dw_hdmi_qp_write(hdmi, swdisable, GLOBAL_SWDISABLE);
+ } else {
+ swdisable = dw_hdmi_qp_read(hdmi, GLOBAL_SWDISABLE);
+ swdisable = swdisable & ~CEC_SWDISABLE;
+ dw_hdmi_qp_write(hdmi, swdisable, GLOBAL_SWDISABLE);
+
+ dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR);
+ dw_hdmi_qp_write(hdmi, 1, CEC_LOCK_CONTROL);
+
+ dw_hdmi_qp_cec_log_addr(bridge, CEC_LOG_ADDR_INVALID);
+
+ irqs = CEC_STAT_LINE_ERR | CEC_STAT_NACK | CEC_STAT_EOM |
+ CEC_STAT_DONE;
+ dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR);
+ dw_hdmi_qp_write(hdmi, irqs, CEC_INT_MASK_N);
+ }
+
+ return 0;
+}
+
+static int dw_hdmi_qp_cec_transmit(struct drm_bridge *bridge, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
+ unsigned int i;
+ u32 val;
+
+ for (i = 0; i < msg->len; i++) {
+ if (!(i % 4))
+ val = msg->msg[i];
+ if ((i % 4) == 1)
+ val |= msg->msg[i] << 8;
+ if ((i % 4) == 2)
+ val |= msg->msg[i] << 16;
+ if ((i % 4) == 3)
+ val |= msg->msg[i] << 24;
+
+ if (i == (msg->len - 1) || (i % 4) == 3)
+ dw_hdmi_qp_write(hdmi, val, CEC_TX_DATA3_0 + (i / 4) * 4);
+ }
+
+ dw_hdmi_qp_write(hdmi, msg->len - 1, CEC_TX_COUNT);
+ dw_hdmi_qp_write(hdmi, CEC_CTRL_START, CEC_TX_CONTROL);
+
+ return 0;
+}
+#else
+#define dw_hdmi_qp_cec_init NULL
+#define dw_hdmi_qp_cec_enable NULL
+#define dw_hdmi_qp_cec_log_addr NULL
+#define dw_hdmi_qp_cec_transmit NULL
+#endif /* CONFIG_DRM_DW_HDMI_QP_CEC */
+
static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
@@ -979,6 +1172,10 @@ static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = {
.hdmi_audio_startup = dw_hdmi_qp_audio_enable,
.hdmi_audio_shutdown = dw_hdmi_qp_audio_disable,
.hdmi_audio_prepare = dw_hdmi_qp_audio_prepare,
+ .hdmi_cec_init = dw_hdmi_qp_cec_init,
+ .hdmi_cec_enable = dw_hdmi_qp_cec_enable,
+ .hdmi_cec_log_addr = dw_hdmi_qp_cec_log_addr,
+ .hdmi_cec_transmit = dw_hdmi_qp_cec_transmit,
};
static irqreturn_t dw_hdmi_qp_main_hardirq(int irq, void *dev_id)
@@ -1014,13 +1211,11 @@ static void dw_hdmi_qp_init_hw(struct dw_hdmi_qp *hdmi)
{
dw_hdmi_qp_write(hdmi, 0, MAINUNIT_0_INT_MASK_N);
dw_hdmi_qp_write(hdmi, 0, MAINUNIT_1_INT_MASK_N);
- dw_hdmi_qp_write(hdmi, 428571429, TIMER_BASE_CONFIG0);
+ dw_hdmi_qp_write(hdmi, hdmi->ref_clk_rate, TIMER_BASE_CONFIG0);
/* Software reset */
dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0);
-
dw_hdmi_qp_write(hdmi, 0x085c085c, I2CM_FM_SCL_CONFIG0);
-
dw_hdmi_qp_mod(hdmi, 0, I2CM_FM_EN, I2CM_INTERFACE_CONTROL0);
/* Clear DONE and ERROR interrupts */
@@ -1066,6 +1261,13 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
hdmi->phy.ops = plat_data->phy_ops;
hdmi->phy.data = plat_data->phy_data;
+ if (plat_data->ref_clk_rate) {
+ hdmi->ref_clk_rate = plat_data->ref_clk_rate;
+ } else {
+ hdmi->ref_clk_rate = 428571429;
+ dev_warn(dev, "Set ref_clk_rate to vendor default\n");
+ }
+
dw_hdmi_qp_init_hw(hdmi);
ret = devm_request_threaded_irq(dev, plat_data->main_irq,
@@ -1093,6 +1295,22 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
hdmi->bridge.hdmi_audio_dev = dev;
hdmi->bridge.hdmi_audio_dai_port = 1;
+#ifdef CONFIG_DRM_DW_HDMI_QP_CEC
+ if (plat_data->cec_irq) {
+ hdmi->bridge.ops |= DRM_BRIDGE_OP_HDMI_CEC_ADAPTER;
+ hdmi->bridge.hdmi_cec_dev = dev;
+ hdmi->bridge.hdmi_cec_adapter_name = dev_name(dev);
+
+ hdmi->cec = devm_kzalloc(hdmi->dev, sizeof(*hdmi->cec), GFP_KERNEL);
+ if (!hdmi->cec)
+ return ERR_PTR(-ENOMEM);
+
+ hdmi->cec->irq = plat_data->cec_irq;
+ } else {
+ dev_warn(dev, "Disabled CEC support due to missing IRQ\n");
+ }
+#endif
+
ret = devm_drm_bridge_add(dev, &hdmi->bridge);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h
index 72987e6c4689..91a15f82e32a 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h
@@ -488,9 +488,23 @@
#define AUDPKT_VBIT_OVR0 0xf24
/* CEC Registers */
#define CEC_TX_CONTROL 0x1000
+#define CEC_CTRL_CLEAR BIT(0)
+#define CEC_CTRL_START BIT(0)
#define CEC_STATUS 0x1004
+#define CEC_STAT_DONE BIT(0)
+#define CEC_STAT_NACK BIT(1)
+#define CEC_STAT_ARBLOST BIT(2)
+#define CEC_STAT_LINE_ERR BIT(3)
+#define CEC_STAT_RETRANS_FAIL BIT(4)
+#define CEC_STAT_DISCARD BIT(5)
+#define CEC_STAT_TX_BUSY BIT(8)
+#define CEC_STAT_RX_BUSY BIT(9)
+#define CEC_STAT_DRIVE_ERR BIT(10)
+#define CEC_STAT_EOM BIT(11)
+#define CEC_STAT_NOTIFY_ERR BIT(12)
#define CEC_CONFIG 0x1008
#define CEC_ADDR 0x100c
+#define CEC_ADDR_BROADCAST BIT(15)
#define CEC_TX_COUNT 0x1020
#define CEC_TX_DATA3_0 0x1024
#define CEC_TX_DATA7_4 0x1028
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 206b099a35e9..3b77e73ac0ea 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -177,6 +177,7 @@ struct dw_hdmi {
spinlock_t audio_lock;
struct mutex audio_mutex;
+ unsigned int sample_iec958;
unsigned int sample_non_pcm;
unsigned int sample_width;
unsigned int sample_rate;
@@ -198,6 +199,12 @@ struct dw_hdmi {
enum drm_connector_status last_connector_result;
};
+const struct dw_hdmi_plat_data *dw_hdmi_to_plat_data(struct dw_hdmi *hdmi)
+{
+ return hdmi->plat_data;
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_to_plat_data);
+
#define HDMI_IH_PHY_STAT0_RX_SENSE \
(HDMI_IH_PHY_STAT0_RX_SENSE0 | HDMI_IH_PHY_STAT0_RX_SENSE1 | \
HDMI_IH_PHY_STAT0_RX_SENSE2 | HDMI_IH_PHY_STAT0_RX_SENSE3)
@@ -712,6 +719,14 @@ void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm)
}
EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_non_pcm);
+void dw_hdmi_set_sample_iec958(struct dw_hdmi *hdmi, unsigned int iec958)
+{
+ mutex_lock(&hdmi->audio_mutex);
+ hdmi->sample_iec958 = iec958;
+ mutex_unlock(&hdmi->audio_mutex);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_iec958);
+
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate)
{
mutex_lock(&hdmi->audio_mutex);
@@ -843,7 +858,8 @@ static void dw_hdmi_gp_audio_enable(struct dw_hdmi *hdmi)
hdmi->channels,
hdmi->sample_width,
hdmi->sample_rate,
- hdmi->sample_non_pcm);
+ hdmi->sample_non_pcm,
+ hdmi->sample_iec958);
}
static void dw_hdmi_gp_audio_disable(struct dw_hdmi *hdmi)
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index ae0d08e5e960..276d05d25ad8 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -106,10 +106,21 @@
#define SN_PWM_EN_INV_REG 0xA5
#define SN_PWM_INV_MASK BIT(0)
#define SN_PWM_EN_MASK BIT(1)
+
+#define SN_IRQ_EN_REG 0xE0
+#define IRQ_EN BIT(0)
+
+#define SN_IRQ_EVENTS_EN_REG 0xE6
+#define HPD_INSERTION_EN BIT(1)
+#define HPD_REMOVAL_EN BIT(2)
+
#define SN_AUX_CMD_STATUS_REG 0xF4
#define AUX_IRQ_STATUS_AUX_RPLY_TOUT BIT(3)
#define AUX_IRQ_STATUS_AUX_SHORT BIT(5)
#define AUX_IRQ_STATUS_NAT_I2C_FAIL BIT(6)
+#define SN_IRQ_STATUS_REG 0xF5
+#define HPD_REMOVAL_STATUS BIT(2)
+#define HPD_INSERTION_STATUS BIT(1)
#define MIN_DSI_CLK_FREQ_MHZ 40
@@ -152,7 +163,9 @@
* @ln_assign: Value to program to the LN_ASSIGN register.
* @ln_polrs: Value for the 4-bit LN_POLRS field of SN_ENH_FRAME_REG.
* @comms_enabled: If true then communication over the aux channel is enabled.
+ * @hpd_enabled: If true then HPD events are enabled.
* @comms_mutex: Protects modification of comms_enabled.
+ * @hpd_mutex: Protects modification of hpd_enabled.
*
* @gchip: If we expose our GPIOs, this is used.
* @gchip_output: A cache of whether we've set GPIOs to output. This
@@ -190,7 +203,9 @@ struct ti_sn65dsi86 {
u8 ln_assign;
u8 ln_polrs;
bool comms_enabled;
+ bool hpd_enabled;
struct mutex comms_mutex;
+ struct mutex hpd_mutex;
#if defined(CONFIG_OF_GPIO)
struct gpio_chip gchip;
@@ -221,6 +236,23 @@ static const struct regmap_config ti_sn65dsi86_regmap_config = {
.max_register = 0xFF,
};
+static int ti_sn65dsi86_read_u8(struct ti_sn65dsi86 *pdata, unsigned int reg,
+ u8 *val)
+{
+ int ret;
+ unsigned int reg_val;
+
+ ret = regmap_read(pdata->regmap, reg, &reg_val);
+ if (ret) {
+ dev_err(pdata->dev, "fail to read raw reg %#x: %d\n",
+ reg, ret);
+ return ret;
+ }
+ *val = (u8)reg_val;
+
+ return 0;
+}
+
static int __maybe_unused ti_sn65dsi86_read_u16(struct ti_sn65dsi86 *pdata,
unsigned int reg, u16 *val)
{
@@ -379,6 +411,7 @@ static void ti_sn65dsi86_disable_comms(struct ti_sn65dsi86 *pdata)
static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
{
struct ti_sn65dsi86 *pdata = dev_get_drvdata(dev);
+ const struct i2c_client *client = to_i2c_client(pdata->dev);
int ret;
ret = regulator_bulk_enable(SN_REGULATOR_SUPPLY_NUM, pdata->supplies);
@@ -413,6 +446,13 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
if (pdata->refclk)
ti_sn65dsi86_enable_comms(pdata, NULL);
+ if (client->irq) {
+ ret = regmap_update_bits(pdata->regmap, SN_IRQ_EN_REG, IRQ_EN,
+ IRQ_EN);
+ if (ret)
+ dev_err(pdata->dev, "Failed to enable IRQ events: %d\n", ret);
+ }
+
return ret;
}
@@ -1211,6 +1251,8 @@ static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry *
static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+ const struct i2c_client *client = to_i2c_client(pdata->dev);
+ int ret;
/*
* Device needs to be powered on before reading the HPD state
@@ -1219,11 +1261,35 @@ static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge)
*/
pm_runtime_get_sync(pdata->dev);
+
+ mutex_lock(&pdata->hpd_mutex);
+ pdata->hpd_enabled = true;
+ mutex_unlock(&pdata->hpd_mutex);
+
+ if (client->irq) {
+ ret = regmap_set_bits(pdata->regmap, SN_IRQ_EVENTS_EN_REG,
+ HPD_REMOVAL_EN | HPD_INSERTION_EN);
+ if (ret)
+ dev_err(pdata->dev, "Failed to enable HPD events: %d\n", ret);
+ }
}
static void ti_sn_bridge_hpd_disable(struct drm_bridge *bridge)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+ const struct i2c_client *client = to_i2c_client(pdata->dev);
+ int ret;
+
+ if (client->irq) {
+ ret = regmap_clear_bits(pdata->regmap, SN_IRQ_EVENTS_EN_REG,
+ HPD_REMOVAL_EN | HPD_INSERTION_EN);
+ if (ret)
+ dev_err(pdata->dev, "Failed to disable HPD events: %d\n", ret);
+ }
+
+ mutex_lock(&pdata->hpd_mutex);
+ pdata->hpd_enabled = false;
+ mutex_unlock(&pdata->hpd_mutex);
pm_runtime_put_autosuspend(pdata->dev);
}
@@ -1309,6 +1375,41 @@ static int ti_sn_bridge_parse_dsi_host(struct ti_sn65dsi86 *pdata)
return 0;
}
+static irqreturn_t ti_sn_bridge_interrupt(int irq, void *private)
+{
+ struct ti_sn65dsi86 *pdata = private;
+ struct drm_device *dev = pdata->bridge.dev;
+ u8 status;
+ int ret;
+ bool hpd_event;
+
+ ret = ti_sn65dsi86_read_u8(pdata, SN_IRQ_STATUS_REG, &status);
+ if (ret) {
+ dev_err(pdata->dev, "Failed to read IRQ status: %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ hpd_event = status & (HPD_REMOVAL_STATUS | HPD_INSERTION_STATUS);
+
+ dev_dbg(pdata->dev, "(SN_IRQ_STATUS_REG = %#x)\n", status);
+ if (!status)
+ return IRQ_NONE;
+
+ ret = regmap_write(pdata->regmap, SN_IRQ_STATUS_REG, status);
+ if (ret) {
+ dev_err(pdata->dev, "Failed to clear IRQ status: %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ /* Only send the HPD event if we are bound with a device. */
+ mutex_lock(&pdata->hpd_mutex);
+ if (pdata->hpd_enabled && hpd_event)
+ drm_kms_helper_hotplug_event(dev);
+ mutex_unlock(&pdata->hpd_mutex);
+
+ return IRQ_HANDLED;
+}
+
static int ti_sn_bridge_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
@@ -1931,6 +2032,7 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
dev_set_drvdata(dev, pdata);
pdata->dev = dev;
+ mutex_init(&pdata->hpd_mutex);
mutex_init(&pdata->comms_mutex);
pdata->regmap = devm_regmap_init_i2c(client,
@@ -1971,6 +2073,16 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
if (strncmp(id_buf, "68ISD ", ARRAY_SIZE(id_buf)))
return dev_err_probe(dev, -EOPNOTSUPP, "unsupported device id\n");
+ if (client->irq) {
+ ret = devm_request_threaded_irq(pdata->dev, client->irq, NULL,
+ ti_sn_bridge_interrupt,
+ IRQF_ONESHOT,
+ dev_name(pdata->dev), pdata);
+
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request interrupt\n");
+ }
+
/*
* Break ourselves up into a collection of aux devices. The only real
* motiviation here is to solve the chicken-and-egg problem of probe
diff --git a/drivers/gpu/drm/clients/drm_fbdev_client.c b/drivers/gpu/drm/clients/drm_fbdev_client.c
index f894ba52bdb5..ec5ab9f30547 100644
--- a/drivers/gpu/drm/clients/drm_fbdev_client.c
+++ b/drivers/gpu/drm/clients/drm_fbdev_client.c
@@ -62,26 +62,20 @@ err_drm_err:
return ret;
}
-static int drm_fbdev_client_suspend(struct drm_client_dev *client, bool holds_console_lock)
+static int drm_fbdev_client_suspend(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- if (holds_console_lock)
- drm_fb_helper_set_suspend(fb_helper, true);
- else
- drm_fb_helper_set_suspend_unlocked(fb_helper, true);
+ drm_fb_helper_set_suspend_unlocked(fb_helper, true);
return 0;
}
-static int drm_fbdev_client_resume(struct drm_client_dev *client, bool holds_console_lock)
+static int drm_fbdev_client_resume(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- if (holds_console_lock)
- drm_fb_helper_set_suspend(fb_helper, false);
- else
- drm_fb_helper_set_suspend_unlocked(fb_helper, false);
+ drm_fb_helper_set_suspend_unlocked(fb_helper, false);
return 0;
}
diff --git a/drivers/gpu/drm/clients/drm_log.c b/drivers/gpu/drm/clients/drm_log.c
index d239f1e3c456..fd8556dd58ed 100644
--- a/drivers/gpu/drm/clients/drm_log.c
+++ b/drivers/gpu/drm/clients/drm_log.c
@@ -319,7 +319,7 @@ static int drm_log_client_hotplug(struct drm_client_dev *client)
return 0;
}
-static int drm_log_client_suspend(struct drm_client_dev *client, bool _console_lock)
+static int drm_log_client_suspend(struct drm_client_dev *client)
{
struct drm_log *dlog = client_to_drm_log(client);
@@ -328,7 +328,7 @@ static int drm_log_client_suspend(struct drm_client_dev *client, bool _console_l
return 0;
}
-static int drm_log_client_resume(struct drm_client_dev *client, bool _console_lock)
+static int drm_log_client_resume(struct drm_client_dev *client)
{
struct drm_log *dlog = client_to_drm_log(client);
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index baacd21e7341..7b18be3ff9a3 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -137,10 +137,9 @@ static void drm_bridge_connector_hpd_notify(struct drm_connector *connector,
{
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
- struct drm_bridge *bridge;
/* Notify all bridges in the pipeline of hotplug events. */
- drm_for_each_bridge_in_chain(bridge_connector->encoder, bridge) {
+ drm_for_each_bridge_in_chain_scoped(bridge_connector->encoder, bridge) {
if (bridge->funcs->hpd_notify)
bridge->funcs->hpd_notify(bridge, status);
}
@@ -619,6 +618,20 @@ static const struct drm_connector_hdmi_cec_funcs drm_bridge_connector_hdmi_cec_f
* Bridge Connector Initialisation
*/
+static void drm_bridge_connector_put_bridges(struct drm_device *dev, void *data)
+{
+ struct drm_bridge_connector *bridge_connector = (struct drm_bridge_connector *)data;
+
+ drm_bridge_put(bridge_connector->bridge_edid);
+ drm_bridge_put(bridge_connector->bridge_hpd);
+ drm_bridge_put(bridge_connector->bridge_detect);
+ drm_bridge_put(bridge_connector->bridge_modes);
+ drm_bridge_put(bridge_connector->bridge_hdmi);
+ drm_bridge_put(bridge_connector->bridge_hdmi_audio);
+ drm_bridge_put(bridge_connector->bridge_dp_audio);
+ drm_bridge_put(bridge_connector->bridge_hdmi_cec);
+}
+
/**
* drm_bridge_connector_init - Initialise a connector for a chain of bridges
* @drm: the DRM device
@@ -639,7 +652,15 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
struct drm_bridge_connector *bridge_connector;
struct drm_connector *connector;
struct i2c_adapter *ddc = NULL;
- struct drm_bridge *bridge, *panel_bridge = NULL;
+ struct drm_bridge *panel_bridge __free(drm_bridge_put) = NULL;
+ struct drm_bridge *bridge_edid __free(drm_bridge_put) = NULL;
+ struct drm_bridge *bridge_hpd __free(drm_bridge_put) = NULL;
+ struct drm_bridge *bridge_detect __free(drm_bridge_put) = NULL;
+ struct drm_bridge *bridge_modes __free(drm_bridge_put) = NULL;
+ struct drm_bridge *bridge_hdmi __free(drm_bridge_put) = NULL;
+ struct drm_bridge *bridge_hdmi_audio __free(drm_bridge_put) = NULL;
+ struct drm_bridge *bridge_dp_audio __free(drm_bridge_put) = NULL;
+ struct drm_bridge *bridge_hdmi_cec __free(drm_bridge_put) = NULL;
unsigned int supported_formats = BIT(HDMI_COLORSPACE_RGB);
unsigned int max_bpc = 8;
bool support_hdcp = false;
@@ -650,6 +671,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (!bridge_connector)
return ERR_PTR(-ENOMEM);
+ ret = drmm_add_action(drm, drm_bridge_connector_put_bridges, bridge_connector);
+ if (ret)
+ return ERR_PTR(ret);
+
bridge_connector->encoder = encoder;
/*
@@ -667,28 +692,36 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
* detection are available, we don't support hotplug detection at all.
*/
connector_type = DRM_MODE_CONNECTOR_Unknown;
- drm_for_each_bridge_in_chain(encoder, bridge) {
+ drm_for_each_bridge_in_chain_scoped(encoder, bridge) {
if (!bridge->interlace_allowed)
connector->interlace_allowed = false;
if (!bridge->ycbcr_420_allowed)
connector->ycbcr_420_allowed = false;
- if (bridge->ops & DRM_BRIDGE_OP_EDID)
- bridge_connector->bridge_edid = bridge;
- if (bridge->ops & DRM_BRIDGE_OP_HPD)
- bridge_connector->bridge_hpd = bridge;
- if (bridge->ops & DRM_BRIDGE_OP_DETECT)
- bridge_connector->bridge_detect = bridge;
- if (bridge->ops & DRM_BRIDGE_OP_MODES)
- bridge_connector->bridge_modes = bridge;
+ if (bridge->ops & DRM_BRIDGE_OP_EDID) {
+ drm_bridge_put(bridge_edid);
+ bridge_edid = drm_bridge_get(bridge);
+ }
+ if (bridge->ops & DRM_BRIDGE_OP_HPD) {
+ drm_bridge_put(bridge_hpd);
+ bridge_hpd = drm_bridge_get(bridge);
+ }
+ if (bridge->ops & DRM_BRIDGE_OP_DETECT) {
+ drm_bridge_put(bridge_detect);
+ bridge_detect = drm_bridge_get(bridge);
+ }
+ if (bridge->ops & DRM_BRIDGE_OP_MODES) {
+ drm_bridge_put(bridge_modes);
+ bridge_modes = drm_bridge_get(bridge);
+ }
if (bridge->ops & DRM_BRIDGE_OP_HDMI) {
- if (bridge_connector->bridge_hdmi)
+ if (bridge_hdmi)
return ERR_PTR(-EBUSY);
if (!bridge->funcs->hdmi_write_infoframe ||
!bridge->funcs->hdmi_clear_infoframe)
return ERR_PTR(-EINVAL);
- bridge_connector->bridge_hdmi = bridge;
+ bridge_hdmi = drm_bridge_get(bridge);
if (bridge->supported_formats)
supported_formats = bridge->supported_formats;
@@ -697,10 +730,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
}
if (bridge->ops & DRM_BRIDGE_OP_HDMI_AUDIO) {
- if (bridge_connector->bridge_hdmi_audio)
+ if (bridge_hdmi_audio)
return ERR_PTR(-EBUSY);
- if (bridge_connector->bridge_dp_audio)
+ if (bridge_dp_audio)
return ERR_PTR(-EBUSY);
if (!bridge->hdmi_audio_max_i2s_playback_channels &&
@@ -711,14 +744,14 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
!bridge->funcs->hdmi_audio_shutdown)
return ERR_PTR(-EINVAL);
- bridge_connector->bridge_hdmi_audio = bridge;
+ bridge_hdmi_audio = drm_bridge_get(bridge);
}
if (bridge->ops & DRM_BRIDGE_OP_DP_AUDIO) {
- if (bridge_connector->bridge_dp_audio)
+ if (bridge_dp_audio)
return ERR_PTR(-EBUSY);
- if (bridge_connector->bridge_hdmi_audio)
+ if (bridge_hdmi_audio)
return ERR_PTR(-EBUSY);
if (!bridge->hdmi_audio_max_i2s_playback_channels &&
@@ -729,7 +762,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
!bridge->funcs->dp_audio_shutdown)
return ERR_PTR(-EINVAL);
- bridge_connector->bridge_dp_audio = bridge;
+ bridge_dp_audio = drm_bridge_get(bridge);
}
if (bridge->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) {
@@ -740,10 +773,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
}
if (bridge->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) {
- if (bridge_connector->bridge_hdmi_cec)
+ if (bridge_hdmi_cec)
return ERR_PTR(-EBUSY);
- bridge_connector->bridge_hdmi_cec = bridge;
+ bridge_hdmi_cec = drm_bridge_get(bridge);
if (!bridge->funcs->hdmi_cec_enable ||
!bridge->funcs->hdmi_cec_log_addr ||
@@ -763,7 +796,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
ddc = bridge->ddc;
if (drm_bridge_is_panel(bridge))
- panel_bridge = bridge;
+ panel_bridge = drm_bridge_get(bridge);
if (bridge->support_hdcp)
support_hdcp = true;
@@ -772,13 +805,13 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return ERR_PTR(-EINVAL);
- if (bridge_connector->bridge_hdmi) {
+ if (bridge_hdmi) {
if (!connector->ycbcr_420_allowed)
supported_formats &= ~BIT(HDMI_COLORSPACE_YUV420);
ret = drmm_connector_hdmi_init(drm, connector,
- bridge_connector->bridge_hdmi->vendor,
- bridge_connector->bridge_hdmi->product,
+ bridge_hdmi->vendor,
+ bridge_hdmi->product,
&drm_bridge_connector_funcs,
&drm_bridge_connector_hdmi_funcs,
connector_type, ddc,
@@ -794,15 +827,14 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
return ERR_PTR(ret);
}
- if (bridge_connector->bridge_hdmi_audio ||
- bridge_connector->bridge_dp_audio) {
+ if (bridge_hdmi_audio || bridge_dp_audio) {
struct device *dev;
struct drm_bridge *bridge;
- if (bridge_connector->bridge_hdmi_audio)
- bridge = bridge_connector->bridge_hdmi_audio;
+ if (bridge_hdmi_audio)
+ bridge = bridge_hdmi_audio;
else
- bridge = bridge_connector->bridge_dp_audio;
+ bridge = bridge_dp_audio;
dev = bridge->hdmi_audio_dev;
@@ -816,9 +848,9 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
return ERR_PTR(ret);
}
- if (bridge_connector->bridge_hdmi_cec &&
- bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) {
- bridge = bridge_connector->bridge_hdmi_cec;
+ if (bridge_hdmi_cec &&
+ bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) {
+ struct drm_bridge *bridge = bridge_hdmi_cec;
ret = drmm_connector_hdmi_cec_notifier_register(connector,
NULL,
@@ -827,9 +859,9 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
return ERR_PTR(ret);
}
- if (bridge_connector->bridge_hdmi_cec &&
- bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) {
- bridge = bridge_connector->bridge_hdmi_cec;
+ if (bridge_hdmi_cec &&
+ bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) {
+ struct drm_bridge *bridge = bridge_hdmi_cec;
ret = drmm_connector_hdmi_cec_register(connector,
&drm_bridge_connector_hdmi_cec_funcs,
@@ -842,9 +874,9 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
- if (bridge_connector->bridge_hpd)
+ if (bridge_hpd)
connector->polled = DRM_CONNECTOR_POLL_HPD;
- else if (bridge_connector->bridge_detect)
+ else if (bridge_detect)
connector->polled = DRM_CONNECTOR_POLL_CONNECT
| DRM_CONNECTOR_POLL_DISCONNECT;
@@ -855,6 +887,15 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
IS_ENABLED(CONFIG_DRM_DISPLAY_HDCP_HELPER))
drm_connector_attach_content_protection_property(connector, true);
+ bridge_connector->bridge_edid = drm_bridge_get(bridge_edid);
+ bridge_connector->bridge_hpd = drm_bridge_get(bridge_hpd);
+ bridge_connector->bridge_detect = drm_bridge_get(bridge_detect);
+ bridge_connector->bridge_modes = drm_bridge_get(bridge_modes);
+ bridge_connector->bridge_hdmi = drm_bridge_get(bridge_hdmi);
+ bridge_connector->bridge_hdmi_audio = drm_bridge_get(bridge_hdmi_audio);
+ bridge_connector->bridge_dp_audio = drm_bridge_get(bridge_dp_audio);
+ bridge_connector->bridge_hdmi_cec = drm_bridge_get(bridge_hdmi_cec);
+
return connector;
}
EXPORT_SYMBOL_GPL(drm_bridge_connector_init);
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index 4aaeae4fa03c..49803528023b 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -29,6 +29,7 @@
#include <linux/init.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
@@ -123,6 +124,14 @@ bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
}
EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
+bool drm_dp_post_lt_adj_req_in_progress(const u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ u8 lane_align = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED);
+
+ return lane_align & DP_POST_LT_ADJ_REQ_IN_PROGRESS;
+}
+EXPORT_SYMBOL(drm_dp_post_lt_adj_req_in_progress);
+
u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
@@ -4128,22 +4137,61 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
{
int fxp, fxp_min, fxp_max, fxp_actual, f = 1;
int ret;
- u8 pn, pn_min, pn_max;
+ u8 pn, pn_min, pn_max, bit_count;
if (!bl->aux_set)
return 0;
- ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn);
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, &bit_count);
if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap: %d\n",
aux->name, ret);
return -ENODEV;
}
- pn &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+ bit_count &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min);
+ if (ret < 0) {
+ drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n",
+ aux->name, ret);
+ return -ENODEV;
+ }
+ pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max);
+ if (ret < 0) {
+ drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n",
+ aux->name, ret);
+ return -ENODEV;
+ }
+ pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+
+ if (unlikely(pn_min > pn_max)) {
+ drm_dbg_kms(aux->drm_dev, "%s: Invalid pwmgen bit count cap min/max returned: %d %d\n",
+ aux->name, pn_min, pn_max);
+ return -EINVAL;
+ }
+
+ /*
+ * Per VESA eDP Spec v1.4b, section 3.3.10.2:
+ * If DP_EDP_PWMGEN_BIT_COUNT is less than DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN,
+ * the sink must use the MIN value as the effective PWM bit count.
+ * Clamp the reported value to the [MIN, MAX] capability range to ensure
+ * correct brightness scaling on compliant eDP panels.
+ * Only enable this logic if the [MIN, MAX] range is valid in regard to Spec.
+ */
+ pn = bit_count;
+ if (bit_count < pn_min)
+ pn = clamp(bit_count, pn_min, pn_max);
+
bl->max = (1 << pn) - 1;
- if (!driver_pwm_freq_hz)
+ if (!driver_pwm_freq_hz) {
+ if (pn != bit_count)
+ goto bit_count_write_back;
+
return 0;
+ }
/*
* Set PWM Frequency divider to match desired frequency provided by the driver.
@@ -4167,21 +4215,6 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
* - FxP is within 25% of desired value.
* Note: 25% is arbitrary value and may need some tweak.
*/
- ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min);
- if (ret < 0) {
- drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n",
- aux->name, ret);
- return 0;
- }
- ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max);
- if (ret < 0) {
- drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n",
- aux->name, ret);
- return 0;
- }
- pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
- pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
-
/* Ensure frequency is within 25% of desired value */
fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4);
fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
@@ -4199,12 +4232,17 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
break;
}
+bit_count_write_back:
ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, pn);
if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n",
aux->name, ret);
return 0;
}
+
+ if (!driver_pwm_freq_hz)
+ return 0;
+
bl->pwmgen_bit_count = pn;
bl->max = (1 << pn) - 1;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index cd15cf52f0c9..be2cb6e43cb0 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -207,9 +207,9 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
continue;
connector->funcs->atomic_destroy_state(connector,
- state->connectors[i].state);
+ state->connectors[i].state_to_destroy);
state->connectors[i].ptr = NULL;
- state->connectors[i].state = NULL;
+ state->connectors[i].state_to_destroy = NULL;
state->connectors[i].old_state = NULL;
state->connectors[i].new_state = NULL;
drm_connector_put(connector);
@@ -222,10 +222,10 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
continue;
crtc->funcs->atomic_destroy_state(crtc,
- state->crtcs[i].state);
+ state->crtcs[i].state_to_destroy);
state->crtcs[i].ptr = NULL;
- state->crtcs[i].state = NULL;
+ state->crtcs[i].state_to_destroy = NULL;
state->crtcs[i].old_state = NULL;
state->crtcs[i].new_state = NULL;
@@ -242,9 +242,9 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
continue;
plane->funcs->atomic_destroy_state(plane,
- state->planes[i].state);
+ state->planes[i].state_to_destroy);
state->planes[i].ptr = NULL;
- state->planes[i].state = NULL;
+ state->planes[i].state_to_destroy = NULL;
state->planes[i].old_state = NULL;
state->planes[i].new_state = NULL;
}
@@ -253,9 +253,9 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
struct drm_private_obj *obj = state->private_objs[i].ptr;
obj->funcs->atomic_destroy_state(obj,
- state->private_objs[i].state);
+ state->private_objs[i].state_to_destroy);
state->private_objs[i].ptr = NULL;
- state->private_objs[i].state = NULL;
+ state->private_objs[i].state_to_destroy = NULL;
state->private_objs[i].old_state = NULL;
state->private_objs[i].new_state = NULL;
}
@@ -349,7 +349,7 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
WARN_ON(!state->acquire_ctx);
- crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (crtc_state)
return crtc_state;
@@ -361,7 +361,7 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
if (!crtc_state)
return ERR_PTR(-ENOMEM);
- state->crtcs[index].state = crtc_state;
+ state->crtcs[index].state_to_destroy = crtc_state;
state->crtcs[index].old_state = crtc->state;
state->crtcs[index].new_state = crtc_state;
state->crtcs[index].ptr = crtc;
@@ -480,8 +480,8 @@ static int drm_atomic_connector_check(struct drm_connector *connector,
}
if (state->crtc)
- crtc_state = drm_atomic_get_existing_crtc_state(state->state,
- state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state->state,
+ state->crtc);
if (writeback_job->fb && !crtc_state->active) {
drm_dbg_atomic(connector->dev,
@@ -534,7 +534,7 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
WARN_ON(plane->old_fb);
WARN_ON(plane->crtc);
- plane_state = drm_atomic_get_existing_plane_state(state, plane);
+ plane_state = drm_atomic_get_new_plane_state(state, plane);
if (plane_state)
return plane_state;
@@ -546,7 +546,7 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
if (!plane_state)
return ERR_PTR(-ENOMEM);
- state->planes[index].state = plane_state;
+ state->planes[index].state_to_destroy = plane_state;
state->planes[index].ptr = plane;
state->planes[index].old_state = plane->state;
state->planes[index].new_state = plane_state;
@@ -831,14 +831,14 @@ struct drm_private_state *
drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
struct drm_private_obj *obj)
{
- int index, num_objs, i, ret;
+ int index, num_objs, ret;
size_t size;
struct __drm_private_objs_state *arr;
struct drm_private_state *obj_state;
- for (i = 0; i < state->num_private_objs; i++)
- if (obj == state->private_objs[i].ptr)
- return state->private_objs[i].state;
+ obj_state = drm_atomic_get_new_private_obj_state(state, obj);
+ if (obj_state)
+ return obj_state;
ret = drm_modeset_lock(&obj->lock, state->acquire_ctx);
if (ret)
@@ -858,7 +858,7 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
if (!obj_state)
return ERR_PTR(-ENOMEM);
- state->private_objs[index].state = obj_state;
+ state->private_objs[index].state_to_destroy = obj_state;
state->private_objs[index].old_state = obj->state;
state->private_objs[index].new_state = obj_state;
state->private_objs[index].ptr = obj;
@@ -1152,15 +1152,16 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
state->num_connector = alloc;
}
- if (state->connectors[index].state)
- return state->connectors[index].state;
+ connector_state = drm_atomic_get_new_connector_state(state, connector);
+ if (connector_state)
+ return connector_state;
connector_state = connector->funcs->atomic_duplicate_state(connector);
if (!connector_state)
return ERR_PTR(-ENOMEM);
drm_connector_get(connector);
- state->connectors[index].state = connector_state;
+ state->connectors[index].state_to_destroy = connector_state;
state->connectors[index].old_state = connector->state;
state->connectors[index].new_state = connector_state;
state->connectors[index].ptr = connector;
@@ -1308,7 +1309,6 @@ drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
struct drm_encoder *encoder)
{
struct drm_bridge_state *bridge_state;
- struct drm_bridge *bridge;
if (!encoder)
return 0;
@@ -1317,7 +1317,7 @@ drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
"Adding all bridges for [encoder:%d:%s] to %p\n",
encoder->base.id, encoder->name, state);
- drm_for_each_bridge_in_chain(encoder, bridge) {
+ drm_for_each_bridge_in_chain_scoped(encoder, bridge) {
/* Skip bridges that don't implement the atomic state hooks. */
if (!bridge->funcs->atomic_duplicate_state)
continue;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index d5ebe6ea0acb..5a473a274ff0 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -3236,7 +3236,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
old_conn_state->state = state;
new_conn_state->state = NULL;
- state->connectors[i].state = old_conn_state;
+ state->connectors[i].state_to_destroy = old_conn_state;
connector->state = new_conn_state;
}
@@ -3246,7 +3246,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
old_crtc_state->state = state;
new_crtc_state->state = NULL;
- state->crtcs[i].state = old_crtc_state;
+ state->crtcs[i].state_to_destroy = old_crtc_state;
crtc->state = new_crtc_state;
if (new_crtc_state->commit) {
@@ -3266,7 +3266,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
old_plane_state->state = state;
new_plane_state->state = NULL;
- state->planes[i].state = old_plane_state;
+ state->planes[i].state_to_destroy = old_plane_state;
plane->state = new_plane_state;
}
drm_panic_unlock(state->dev, flags);
@@ -3277,7 +3277,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
old_obj_state->state = state;
new_obj_state->state = NULL;
- state->private_objs[i].state = old_obj_state;
+ state->private_objs[i].state_to_destroy = old_obj_state;
obj->state = new_obj_state;
}
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index d031447eebc9..53e7ece36dd9 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -197,15 +197,22 @@
* driver.
*/
+/* Protect bridge_list and bridge_lingering_list */
static DEFINE_MUTEX(bridge_lock);
static LIST_HEAD(bridge_list);
+static LIST_HEAD(bridge_lingering_list);
static void __drm_bridge_free(struct kref *kref)
{
struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount);
+ mutex_lock(&bridge_lock);
+ list_del(&bridge->list);
+ mutex_unlock(&bridge_lock);
+
if (bridge->funcs->destroy)
bridge->funcs->destroy(bridge);
+
kfree(bridge->container);
}
@@ -273,6 +280,7 @@ void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
return ERR_PTR(-ENOMEM);
bridge = container + offset;
+ INIT_LIST_HEAD(&bridge->list);
bridge->container = container;
bridge->funcs = funcs;
kref_init(&bridge->refcount);
@@ -286,10 +294,13 @@ void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
EXPORT_SYMBOL(__devm_drm_bridge_alloc);
/**
- * drm_bridge_add - add the given bridge to the global bridge list
+ * drm_bridge_add - register a bridge
*
* @bridge: bridge control structure
*
+ * Add the given bridge to the global list of bridges, where they can be
+ * found by users via of_drm_find_bridge().
+ *
* The bridge to be added must have been allocated by
* devm_drm_bridge_alloc().
*/
@@ -300,6 +311,14 @@ void drm_bridge_add(struct drm_bridge *bridge)
drm_bridge_get(bridge);
+ /*
+ * If the bridge was previously added and then removed, it is now
+ * in bridge_lingering_list. Remove it or bridge_lingering_list will be
+ * corrupted when adding this bridge to bridge_list below.
+ */
+ if (!list_empty(&bridge->list))
+ list_del_init(&bridge->list);
+
mutex_init(&bridge->hpd_mutex);
if (bridge->ops & DRM_BRIDGE_OP_HDMI)
@@ -336,14 +355,19 @@ int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge)
EXPORT_SYMBOL(devm_drm_bridge_add);
/**
- * drm_bridge_remove - remove the given bridge from the global bridge list
+ * drm_bridge_remove - unregister a bridge
*
* @bridge: bridge control structure
+ *
+ * Remove the given bridge from the global list of registered bridges, so
+ * it won't be found by users via of_drm_find_bridge(), and add it to the
+ * lingering bridge list, to keep track of it until its allocated memory is
+ * eventually freed.
*/
void drm_bridge_remove(struct drm_bridge *bridge)
{
mutex_lock(&bridge_lock);
- list_del_init(&bridge->list);
+ list_move_tail(&bridge->list, &bridge_lingering_list);
mutex_unlock(&bridge_lock);
mutex_destroy(&bridge->hpd_mutex);
@@ -1062,12 +1086,12 @@ drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge,
struct drm_encoder *encoder = bridge->encoder;
struct drm_bridge_state *last_bridge_state;
unsigned int i, num_out_bus_fmts = 0;
- struct drm_bridge *last_bridge;
u32 *out_bus_fmts;
int ret = 0;
- last_bridge = list_last_entry(&encoder->bridge_chain,
- struct drm_bridge, chain_node);
+ struct drm_bridge *last_bridge __free(drm_bridge_put) =
+ drm_bridge_get(list_last_entry(&encoder->bridge_chain,
+ struct drm_bridge, chain_node));
last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
last_bridge);
@@ -1121,7 +1145,6 @@ drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
struct drm_bridge_state *bridge_state, *next_bridge_state;
- struct drm_bridge *next_bridge;
u32 output_flags = 0;
bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
@@ -1130,7 +1153,7 @@ drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
if (!bridge_state)
return;
- next_bridge = drm_bridge_get_next_bridge(bridge);
+ struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge);
/*
* Let's try to apply the most common case here, that is, propagate
@@ -1432,17 +1455,20 @@ EXPORT_SYMBOL(devm_drm_put_bridge);
static void drm_bridge_debugfs_show_bridge(struct drm_printer *p,
struct drm_bridge *bridge,
- unsigned int idx)
+ unsigned int idx,
+ bool lingering)
{
drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs);
- drm_printf(p, "\trefcount: %u\n", kref_read(&bridge->refcount));
+ drm_printf(p, "\trefcount: %u%s\n", kref_read(&bridge->refcount),
+ lingering ? " [lingering]" : "");
drm_printf(p, "\ttype: [%d] %s\n",
bridge->type,
drm_get_connector_type_name(bridge->type));
- if (bridge->of_node)
+ /* The OF node could be freed after drm_bridge_remove() */
+ if (bridge->of_node && !lingering)
drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node);
drm_printf(p, "\tops: [0x%x]", bridge->ops);
@@ -1468,7 +1494,10 @@ static int allbridges_show(struct seq_file *m, void *data)
mutex_lock(&bridge_lock);
list_for_each_entry(bridge, &bridge_list, list)
- drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
+ drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false);
+
+ list_for_each_entry(bridge, &bridge_lingering_list, list)
+ drm_bridge_debugfs_show_bridge(&p, bridge, idx++, true);
mutex_unlock(&bridge_lock);
@@ -1480,11 +1509,10 @@ static int encoder_bridges_show(struct seq_file *m, void *data)
{
struct drm_encoder *encoder = m->private;
struct drm_printer p = drm_seq_file_printer(m);
- struct drm_bridge *bridge;
unsigned int idx = 0;
- drm_for_each_bridge_in_chain(encoder, bridge)
- drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
+ drm_for_each_bridge_in_chain_scoped(encoder, bridge)
+ drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false);
return 0;
}
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index a94061f373de..f2c92902e4a3 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -12,8 +12,17 @@
#include <drm/drm_buddy.h>
+enum drm_buddy_free_tree {
+ DRM_BUDDY_CLEAR_TREE = 0,
+ DRM_BUDDY_DIRTY_TREE,
+ DRM_BUDDY_MAX_FREE_TREES,
+};
+
static struct kmem_cache *slab_blocks;
+#define for_each_free_tree(tree) \
+ for ((tree) = 0; (tree) < DRM_BUDDY_MAX_FREE_TREES; (tree)++)
+
static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
struct drm_buddy_block *parent,
unsigned int order,
@@ -31,6 +40,8 @@ static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
block->header |= order;
block->parent = parent;
+ RB_CLEAR_NODE(&block->rb);
+
BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
return block;
}
@@ -41,23 +52,64 @@ static void drm_block_free(struct drm_buddy *mm,
kmem_cache_free(slab_blocks, block);
}
-static void list_insert_sorted(struct drm_buddy *mm,
- struct drm_buddy_block *block)
+static enum drm_buddy_free_tree
+get_block_tree(struct drm_buddy_block *block)
{
- struct drm_buddy_block *node;
- struct list_head *head;
+ return drm_buddy_block_is_clear(block) ?
+ DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE;
+}
- head = &mm->free_list[drm_buddy_block_order(block)];
- if (list_empty(head)) {
- list_add(&block->link, head);
- return;
- }
+static struct drm_buddy_block *
+rbtree_get_free_block(const struct rb_node *node)
+{
+ return node ? rb_entry(node, struct drm_buddy_block, rb) : NULL;
+}
- list_for_each_entry(node, head, link)
- if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
- break;
+static struct drm_buddy_block *
+rbtree_last_free_block(struct rb_root *root)
+{
+ return rbtree_get_free_block(rb_last(root));
+}
- __list_add(&block->link, node->link.prev, &node->link);
+static bool rbtree_is_empty(struct rb_root *root)
+{
+ return RB_EMPTY_ROOT(root);
+}
+
+static bool drm_buddy_block_offset_less(const struct drm_buddy_block *block,
+ const struct drm_buddy_block *node)
+{
+ return drm_buddy_block_offset(block) < drm_buddy_block_offset(node);
+}
+
+static bool rbtree_block_offset_less(struct rb_node *block,
+ const struct rb_node *node)
+{
+ return drm_buddy_block_offset_less(rbtree_get_free_block(block),
+ rbtree_get_free_block(node));
+}
+
+static void rbtree_insert(struct drm_buddy *mm,
+ struct drm_buddy_block *block,
+ enum drm_buddy_free_tree tree)
+{
+ rb_add(&block->rb,
+ &mm->free_trees[tree][drm_buddy_block_order(block)],
+ rbtree_block_offset_less);
+}
+
+static void rbtree_remove(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ unsigned int order = drm_buddy_block_order(block);
+ enum drm_buddy_free_tree tree;
+ struct rb_root *root;
+
+ tree = get_block_tree(block);
+ root = &mm->free_trees[tree][order];
+
+ rb_erase(&block->rb, root);
+ RB_CLEAR_NODE(&block->rb);
}
static void clear_reset(struct drm_buddy_block *block)
@@ -70,29 +122,34 @@ static void mark_cleared(struct drm_buddy_block *block)
block->header |= DRM_BUDDY_HEADER_CLEAR;
}
-static void mark_allocated(struct drm_buddy_block *block)
+static void mark_allocated(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
{
block->header &= ~DRM_BUDDY_HEADER_STATE;
block->header |= DRM_BUDDY_ALLOCATED;
- list_del(&block->link);
+ rbtree_remove(mm, block);
}
static void mark_free(struct drm_buddy *mm,
struct drm_buddy_block *block)
{
+ enum drm_buddy_free_tree tree;
+
block->header &= ~DRM_BUDDY_HEADER_STATE;
block->header |= DRM_BUDDY_FREE;
- list_insert_sorted(mm, block);
+ tree = get_block_tree(block);
+ rbtree_insert(mm, block, tree);
}
-static void mark_split(struct drm_buddy_block *block)
+static void mark_split(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
{
block->header &= ~DRM_BUDDY_HEADER_STATE;
block->header |= DRM_BUDDY_SPLIT;
- list_del(&block->link);
+ rbtree_remove(mm, block);
}
static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
@@ -148,7 +205,7 @@ static unsigned int __drm_buddy_free(struct drm_buddy *mm,
mark_cleared(parent);
}
- list_del(&buddy->link);
+ rbtree_remove(mm, buddy);
if (force_merge && drm_buddy_block_is_clear(buddy))
mm->clear_avail -= drm_buddy_block_size(mm, buddy);
@@ -169,7 +226,7 @@ static int __force_merge(struct drm_buddy *mm,
u64 end,
unsigned int min_order)
{
- unsigned int order;
+ unsigned int tree, order;
int i;
if (!min_order)
@@ -178,44 +235,48 @@ static int __force_merge(struct drm_buddy *mm,
if (min_order > mm->max_order)
return -EINVAL;
- for (i = min_order - 1; i >= 0; i--) {
- struct drm_buddy_block *block, *prev;
+ for_each_free_tree(tree) {
+ for (i = min_order - 1; i >= 0; i--) {
+ struct rb_node *iter = rb_last(&mm->free_trees[tree][i]);
- list_for_each_entry_safe_reverse(block, prev, &mm->free_list[i], link) {
- struct drm_buddy_block *buddy;
- u64 block_start, block_end;
+ while (iter) {
+ struct drm_buddy_block *block, *buddy;
+ u64 block_start, block_end;
- if (!block->parent)
- continue;
+ block = rbtree_get_free_block(iter);
+ iter = rb_prev(iter);
- block_start = drm_buddy_block_offset(block);
- block_end = block_start + drm_buddy_block_size(mm, block) - 1;
+ if (!block || !block->parent)
+ continue;
- if (!contains(start, end, block_start, block_end))
- continue;
+ block_start = drm_buddy_block_offset(block);
+ block_end = block_start + drm_buddy_block_size(mm, block) - 1;
- buddy = __get_buddy(block);
- if (!drm_buddy_block_is_free(buddy))
- continue;
+ if (!contains(start, end, block_start, block_end))
+ continue;
- WARN_ON(drm_buddy_block_is_clear(block) ==
- drm_buddy_block_is_clear(buddy));
+ buddy = __get_buddy(block);
+ if (!drm_buddy_block_is_free(buddy))
+ continue;
- /*
- * If the prev block is same as buddy, don't access the
- * block in the next iteration as we would free the
- * buddy block as part of the free function.
- */
- if (prev == buddy)
- prev = list_prev_entry(prev, link);
+ WARN_ON(drm_buddy_block_is_clear(block) ==
+ drm_buddy_block_is_clear(buddy));
- list_del(&block->link);
- if (drm_buddy_block_is_clear(block))
- mm->clear_avail -= drm_buddy_block_size(mm, block);
+ /*
+ * Advance to the next node when the current node is the buddy,
+ * as freeing the block will also remove its buddy from the tree.
+ */
+ if (iter == &buddy->rb)
+ iter = rb_prev(iter);
- order = __drm_buddy_free(mm, block, true);
- if (order >= min_order)
- return 0;
+ rbtree_remove(mm, block);
+ if (drm_buddy_block_is_clear(block))
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
+
+ order = __drm_buddy_free(mm, block, true);
+ if (order >= min_order)
+ return 0;
+ }
}
}
@@ -236,8 +297,8 @@ static int __force_merge(struct drm_buddy *mm,
*/
int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
{
- unsigned int i;
- u64 offset;
+ unsigned int i, j, root_count = 0;
+ u64 offset = 0;
if (size < chunk_size)
return -EINVAL;
@@ -258,14 +319,22 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
- mm->free_list = kmalloc_array(mm->max_order + 1,
- sizeof(struct list_head),
- GFP_KERNEL);
- if (!mm->free_list)
+ mm->free_trees = kmalloc_array(DRM_BUDDY_MAX_FREE_TREES,
+ sizeof(*mm->free_trees),
+ GFP_KERNEL);
+ if (!mm->free_trees)
return -ENOMEM;
- for (i = 0; i <= mm->max_order; ++i)
- INIT_LIST_HEAD(&mm->free_list[i]);
+ for_each_free_tree(i) {
+ mm->free_trees[i] = kmalloc_array(mm->max_order + 1,
+ sizeof(struct rb_root),
+ GFP_KERNEL);
+ if (!mm->free_trees[i])
+ goto out_free_tree;
+
+ for (j = 0; j <= mm->max_order; ++j)
+ mm->free_trees[i][j] = RB_ROOT;
+ }
mm->n_roots = hweight64(size);
@@ -273,10 +342,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
sizeof(struct drm_buddy_block *),
GFP_KERNEL);
if (!mm->roots)
- goto out_free_list;
-
- offset = 0;
- i = 0;
+ goto out_free_tree;
/*
* Split into power-of-two blocks, in case we are given a size that is
@@ -296,24 +362,26 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
mark_free(mm, root);
- BUG_ON(i > mm->max_order);
+ BUG_ON(root_count > mm->max_order);
BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
- mm->roots[i] = root;
+ mm->roots[root_count] = root;
offset += root_size;
size -= root_size;
- i++;
+ root_count++;
} while (size);
return 0;
out_free_roots:
- while (i--)
- drm_block_free(mm, mm->roots[i]);
+ while (root_count--)
+ drm_block_free(mm, mm->roots[root_count]);
kfree(mm->roots);
-out_free_list:
- kfree(mm->free_list);
+out_free_tree:
+ while (i--)
+ kfree(mm->free_trees[i]);
+ kfree(mm->free_trees);
return -ENOMEM;
}
EXPORT_SYMBOL(drm_buddy_init);
@@ -323,7 +391,7 @@ EXPORT_SYMBOL(drm_buddy_init);
*
* @mm: DRM buddy manager to free
*
- * Cleanup memory manager resources and the freelist
+ * Cleanup memory manager resources and the freetree
*/
void drm_buddy_fini(struct drm_buddy *mm)
{
@@ -349,8 +417,9 @@ void drm_buddy_fini(struct drm_buddy *mm)
WARN_ON(mm->avail != mm->size);
+ for_each_free_tree(i)
+ kfree(mm->free_trees[i]);
kfree(mm->roots);
- kfree(mm->free_list);
}
EXPORT_SYMBOL(drm_buddy_fini);
@@ -374,8 +443,7 @@ static int split_block(struct drm_buddy *mm,
return -ENOMEM;
}
- mark_free(mm, block->left);
- mark_free(mm, block->right);
+ mark_split(mm, block);
if (drm_buddy_block_is_clear(block)) {
mark_cleared(block->left);
@@ -383,7 +451,8 @@ static int split_block(struct drm_buddy *mm,
clear_reset(block);
}
- mark_split(block);
+ mark_free(mm, block->left);
+ mark_free(mm, block->right);
return 0;
}
@@ -412,10 +481,11 @@ EXPORT_SYMBOL(drm_get_buddy);
* @is_clear: blocks clear state
*
* Reset the clear state based on @is_clear value for each block
- * in the freelist.
+ * in the freetree.
*/
void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
{
+ enum drm_buddy_free_tree src_tree, dst_tree;
u64 root_size, size, start;
unsigned int order;
int i;
@@ -430,19 +500,24 @@ void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
size -= root_size;
}
+ src_tree = is_clear ? DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE;
+ dst_tree = is_clear ? DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE;
+
for (i = 0; i <= mm->max_order; ++i) {
- struct drm_buddy_block *block;
-
- list_for_each_entry_reverse(block, &mm->free_list[i], link) {
- if (is_clear != drm_buddy_block_is_clear(block)) {
- if (is_clear) {
- mark_cleared(block);
- mm->clear_avail += drm_buddy_block_size(mm, block);
- } else {
- clear_reset(block);
- mm->clear_avail -= drm_buddy_block_size(mm, block);
- }
+ struct rb_root *root = &mm->free_trees[src_tree][i];
+ struct drm_buddy_block *block, *tmp;
+
+ rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) {
+ rbtree_remove(mm, block);
+ if (is_clear) {
+ mark_cleared(block);
+ mm->clear_avail += drm_buddy_block_size(mm, block);
+ } else {
+ clear_reset(block);
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
}
+
+ rbtree_insert(mm, block, dst_tree);
}
}
}
@@ -632,23 +707,17 @@ __drm_buddy_alloc_range_bias(struct drm_buddy *mm,
}
static struct drm_buddy_block *
-get_maxblock(struct drm_buddy *mm, unsigned int order,
- unsigned long flags)
+get_maxblock(struct drm_buddy *mm,
+ unsigned int order,
+ enum drm_buddy_free_tree tree)
{
struct drm_buddy_block *max_block = NULL, *block = NULL;
+ struct rb_root *root;
unsigned int i;
for (i = order; i <= mm->max_order; ++i) {
- struct drm_buddy_block *tmp_block;
-
- list_for_each_entry_reverse(tmp_block, &mm->free_list[i], link) {
- if (block_incompatible(tmp_block, flags))
- continue;
-
- block = tmp_block;
- break;
- }
-
+ root = &mm->free_trees[tree][i];
+ block = rbtree_last_free_block(root);
if (!block)
continue;
@@ -667,46 +736,44 @@ get_maxblock(struct drm_buddy *mm, unsigned int order,
}
static struct drm_buddy_block *
-alloc_from_freelist(struct drm_buddy *mm,
+alloc_from_freetree(struct drm_buddy *mm,
unsigned int order,
unsigned long flags)
{
struct drm_buddy_block *block = NULL;
+ struct rb_root *root;
+ enum drm_buddy_free_tree tree;
unsigned int tmp;
int err;
+ tree = (flags & DRM_BUDDY_CLEAR_ALLOCATION) ?
+ DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE;
+
if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
- block = get_maxblock(mm, order, flags);
+ block = get_maxblock(mm, order, tree);
if (block)
/* Store the obtained block order */
tmp = drm_buddy_block_order(block);
} else {
for (tmp = order; tmp <= mm->max_order; ++tmp) {
- struct drm_buddy_block *tmp_block;
-
- list_for_each_entry_reverse(tmp_block, &mm->free_list[tmp], link) {
- if (block_incompatible(tmp_block, flags))
- continue;
-
- block = tmp_block;
- break;
- }
-
+ /* Get RB tree root for this order and tree */
+ root = &mm->free_trees[tree][tmp];
+ block = rbtree_last_free_block(root);
if (block)
break;
}
}
if (!block) {
- /* Fallback method */
+ /* Try allocating from the other tree */
+ tree = (tree == DRM_BUDDY_CLEAR_TREE) ?
+ DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE;
+
for (tmp = order; tmp <= mm->max_order; ++tmp) {
- if (!list_empty(&mm->free_list[tmp])) {
- block = list_last_entry(&mm->free_list[tmp],
- struct drm_buddy_block,
- link);
- if (block)
- break;
- }
+ root = &mm->free_trees[tree][tmp];
+ block = rbtree_last_free_block(root);
+ if (block)
+ break;
}
if (!block)
@@ -771,7 +838,7 @@ static int __alloc_range(struct drm_buddy *mm,
if (contains(start, end, block_start, block_end)) {
if (drm_buddy_block_is_free(block)) {
- mark_allocated(block);
+ mark_allocated(mm, block);
total_allocated += drm_buddy_block_size(mm, block);
mm->avail -= drm_buddy_block_size(mm, block);
if (drm_buddy_block_is_clear(block))
@@ -849,10 +916,9 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm,
{
u64 rhs_offset, lhs_offset, lhs_size, filled;
struct drm_buddy_block *block;
- struct list_head *list;
+ unsigned int tree, order;
LIST_HEAD(blocks_lhs);
unsigned long pages;
- unsigned int order;
u64 modify_size;
int err;
@@ -862,35 +928,45 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm,
if (order == 0)
return -ENOSPC;
- list = &mm->free_list[order];
- if (list_empty(list))
- return -ENOSPC;
+ for_each_free_tree(tree) {
+ struct rb_root *root;
+ struct rb_node *iter;
+
+ root = &mm->free_trees[tree][order];
+ if (rbtree_is_empty(root))
+ continue;
- list_for_each_entry_reverse(block, list, link) {
- /* Allocate blocks traversing RHS */
- rhs_offset = drm_buddy_block_offset(block);
- err = __drm_buddy_alloc_range(mm, rhs_offset, size,
- &filled, blocks);
- if (!err || err != -ENOSPC)
- return err;
-
- lhs_size = max((size - filled), min_block_size);
- if (!IS_ALIGNED(lhs_size, min_block_size))
- lhs_size = round_up(lhs_size, min_block_size);
-
- /* Allocate blocks traversing LHS */
- lhs_offset = drm_buddy_block_offset(block) - lhs_size;
- err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size,
- NULL, &blocks_lhs);
- if (!err) {
- list_splice(&blocks_lhs, blocks);
- return 0;
- } else if (err != -ENOSPC) {
+ iter = rb_last(root);
+ while (iter) {
+ block = rbtree_get_free_block(iter);
+
+ /* Allocate blocks traversing RHS */
+ rhs_offset = drm_buddy_block_offset(block);
+ err = __drm_buddy_alloc_range(mm, rhs_offset, size,
+ &filled, blocks);
+ if (!err || err != -ENOSPC)
+ return err;
+
+ lhs_size = max((size - filled), min_block_size);
+ if (!IS_ALIGNED(lhs_size, min_block_size))
+ lhs_size = round_up(lhs_size, min_block_size);
+
+ /* Allocate blocks traversing LHS */
+ lhs_offset = drm_buddy_block_offset(block) - lhs_size;
+ err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size,
+ NULL, &blocks_lhs);
+ if (!err) {
+ list_splice(&blocks_lhs, blocks);
+ return 0;
+ } else if (err != -ENOSPC) {
+ drm_buddy_free_list_internal(mm, blocks);
+ return err;
+ }
+ /* Free blocks for the next iteration */
drm_buddy_free_list_internal(mm, blocks);
- return err;
+
+ iter = rb_prev(iter);
}
- /* Free blocks for the next iteration */
- drm_buddy_free_list_internal(mm, blocks);
}
return -ENOSPC;
@@ -976,7 +1052,7 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
list_add(&block->tmp_link, &dfs);
err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL);
if (err) {
- mark_allocated(block);
+ mark_allocated(mm, block);
mm->avail -= drm_buddy_block_size(mm, block);
if (drm_buddy_block_is_clear(block))
mm->clear_avail -= drm_buddy_block_size(mm, block);
@@ -999,8 +1075,8 @@ __drm_buddy_alloc_blocks(struct drm_buddy *mm,
return __drm_buddy_alloc_range_bias(mm, start, end,
order, flags);
else
- /* Allocate from freelist */
- return alloc_from_freelist(mm, order, flags);
+ /* Allocate from freetree */
+ return alloc_from_freetree(mm, order, flags);
}
/**
@@ -1017,8 +1093,8 @@ __drm_buddy_alloc_blocks(struct drm_buddy *mm,
* alloc_range_bias() called on range limitations, which traverses
* the tree and returns the desired block.
*
- * alloc_from_freelist() called when *no* range restrictions
- * are enforced, which picks the block from the freelist.
+ * alloc_from_freetree() called when *no* range restrictions
+ * are enforced, which picks the block from the freetree.
*
* Returns:
* 0 on success, error code on failure.
@@ -1120,7 +1196,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
}
} while (1);
- mark_allocated(block);
+ mark_allocated(mm, block);
mm->avail -= drm_buddy_block_size(mm, block);
if (drm_buddy_block_is_clear(block))
mm->clear_avail -= drm_buddy_block_size(mm, block);
@@ -1201,12 +1277,18 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20);
for (order = mm->max_order; order >= 0; order--) {
- struct drm_buddy_block *block;
+ struct drm_buddy_block *block, *tmp;
+ struct rb_root *root;
u64 count = 0, free;
+ unsigned int tree;
- list_for_each_entry(block, &mm->free_list[order], link) {
- BUG_ON(!drm_buddy_block_is_free(block));
- count++;
+ for_each_free_tree(tree) {
+ root = &mm->free_trees[tree][order];
+
+ rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) {
+ BUG_ON(!drm_buddy_block_is_free(block));
+ count++;
+ }
}
drm_printf(p, "order-%2d ", order);
diff --git a/drivers/gpu/drm/drm_client_event.c b/drivers/gpu/drm/drm_client_event.c
index c83196ad8b59..c3baeb4d4e6b 100644
--- a/drivers/gpu/drm/drm_client_event.c
+++ b/drivers/gpu/drm/drm_client_event.c
@@ -122,7 +122,7 @@ void drm_client_dev_restore(struct drm_device *dev)
mutex_unlock(&dev->clientlist_mutex);
}
-static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_lock)
+static int drm_client_suspend(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
int ret = 0;
@@ -131,7 +131,7 @@ static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_
return 0;
if (client->funcs && client->funcs->suspend)
- ret = client->funcs->suspend(client, holds_console_lock);
+ ret = client->funcs->suspend(client);
drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
client->suspended = true;
@@ -139,20 +139,20 @@ static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_
return ret;
}
-void drm_client_dev_suspend(struct drm_device *dev, bool holds_console_lock)
+void drm_client_dev_suspend(struct drm_device *dev)
{
struct drm_client_dev *client;
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry(client, &dev->clientlist, list) {
if (!client->suspended)
- drm_client_suspend(client, holds_console_lock);
+ drm_client_suspend(client);
}
mutex_unlock(&dev->clientlist_mutex);
}
EXPORT_SYMBOL(drm_client_dev_suspend);
-static int drm_client_resume(struct drm_client_dev *client, bool holds_console_lock)
+static int drm_client_resume(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
int ret = 0;
@@ -161,7 +161,7 @@ static int drm_client_resume(struct drm_client_dev *client, bool holds_console_l
return 0;
if (client->funcs && client->funcs->resume)
- ret = client->funcs->resume(client, holds_console_lock);
+ ret = client->funcs->resume(client);
drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
client->suspended = false;
@@ -172,14 +172,14 @@ static int drm_client_resume(struct drm_client_dev *client, bool holds_console_l
return ret;
}
-void drm_client_dev_resume(struct drm_device *dev, bool holds_console_lock)
+void drm_client_dev_resume(struct drm_device *dev)
{
struct drm_client_dev *client;
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry(client, &dev->clientlist, list) {
if (client->suspended)
- drm_client_resume(client, holds_console_lock);
+ drm_client_resume(client);
}
mutex_unlock(&dev->clientlist_mutex);
}
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 9c2c3b0c8c47..fc4caf7da5fc 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -1293,6 +1293,50 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode)
}
EXPORT_SYMBOL(drm_client_modeset_dpms);
+/**
+ * drm_client_modeset_wait_for_vblank() - Wait for the next VBLANK to occur
+ * @client: DRM client
+ * @crtc_index: The ndex of the CRTC to wait on
+ *
+ * Block the caller until the given CRTC has seen a VBLANK. Do nothing
+ * if the CRTC is disabled. If there's another DRM master present, fail
+ * with -EBUSY.
+ *
+ * Returns:
+ * 0 on success, or negative error code otherwise.
+ */
+int drm_client_modeset_wait_for_vblank(struct drm_client_dev *client, unsigned int crtc_index)
+{
+ struct drm_device *dev = client->dev;
+ struct drm_crtc *crtc;
+ int ret;
+
+ /*
+ * Rate-limit update frequency to vblank. If there's a DRM master
+ * present, it could interfere while we're waiting for the vblank
+ * event. Don't wait in this case.
+ */
+ if (!drm_master_internal_acquire(dev))
+ return -EBUSY;
+
+ crtc = client->modesets[crtc_index].crtc;
+
+ /*
+ * Only wait for a vblank event if the CRTC is enabled, otherwise
+ * just don't do anything, not even report an error.
+ */
+ ret = drm_crtc_vblank_get(crtc);
+ if (!ret) {
+ drm_crtc_wait_one_vblank(crtc);
+ drm_crtc_vblank_put(crtc);
+ }
+
+ drm_master_internal_release(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_client_modeset_wait_for_vblank);
+
#ifdef CONFIG_DRM_KUNIT_TEST
#include "tests/drm_client_modeset_test.c"
#endif
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
index 70032bba1c97..e9eed9a5b760 100644
--- a/drivers/gpu/drm/drm_dumb_buffers.c
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -25,6 +25,8 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_dumb_buffers.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
#include <drm/drm_mode.h>
@@ -57,6 +59,134 @@
* a hardware-specific ioctl to allocate suitable buffer objects.
*/
+static int drm_mode_align_dumb(struct drm_mode_create_dumb *args,
+ unsigned long hw_pitch_align,
+ unsigned long hw_size_align)
+{
+ u32 pitch = args->pitch;
+ u32 size;
+
+ if (!pitch)
+ return -EINVAL;
+
+ if (hw_pitch_align)
+ pitch = roundup(pitch, hw_pitch_align);
+
+ if (!hw_size_align)
+ hw_size_align = PAGE_SIZE;
+ else if (!IS_ALIGNED(hw_size_align, PAGE_SIZE))
+ return -EINVAL; /* TODO: handle this if necessary */
+
+ if (check_mul_overflow(args->height, pitch, &size))
+ return -EINVAL;
+ size = ALIGN(size, hw_size_align);
+ if (!size)
+ return -EINVAL;
+
+ args->pitch = pitch;
+ args->size = size;
+
+ return 0;
+}
+
+/**
+ * drm_mode_size_dumb - Calculates the scanline and buffer sizes for dumb buffers
+ * @dev: DRM device
+ * @args: Parameters for the dumb buffer
+ * @hw_pitch_align: Hardware scanline alignment in bytes
+ * @hw_size_align: Hardware buffer-size alignment in bytes
+ *
+ * The helper drm_mode_size_dumb() calculates the size of the buffer
+ * allocation and the scanline size for a dumb buffer. Callers have to
+ * set the buffers width, height and color mode in the argument @arg.
+ * The helper validates the correctness of the input and tests for
+ * possible overflows. If successful, it returns the dumb buffer's
+ * required scanline pitch and size in &args.
+ *
+ * The parameter @hw_pitch_align allows the driver to specifies an
+ * alignment for the scanline pitch, if the hardware requires any. The
+ * calculated pitch will be a multiple of the alignment. The parameter
+ * @hw_size_align allows to specify an alignment for buffer sizes. The
+ * provided alignment should represent requirements of the graphics
+ * hardware. drm_mode_size_dumb() handles GEM-related constraints
+ * automatically across all drivers and hardware. For example, the
+ * returned buffer size is always a multiple of PAGE_SIZE, which is
+ * required by mmap().
+ *
+ * Returns:
+ * Zero on success, or a negative error code otherwise.
+ */
+int drm_mode_size_dumb(struct drm_device *dev,
+ struct drm_mode_create_dumb *args,
+ unsigned long hw_pitch_align,
+ unsigned long hw_size_align)
+{
+ u64 pitch = 0;
+ u32 fourcc;
+
+ /*
+ * The scanline pitch depends on the buffer width and the color
+ * format. The latter is specified as a color-mode constant for
+ * which we first have to find the corresponding color format.
+ *
+ * Different color formats can have the same color-mode constant.
+ * For example XRGB8888 and BGRX8888 both have a color mode of 32.
+ * It is possible to use different formats for dumb-buffer allocation
+ * and rendering as long as all involved formats share the same
+ * color-mode constant.
+ */
+ fourcc = drm_driver_color_mode_format(dev, args->bpp);
+ if (fourcc != DRM_FORMAT_INVALID) {
+ const struct drm_format_info *info = drm_format_info(fourcc);
+
+ if (!info)
+ return -EINVAL;
+ pitch = drm_format_info_min_pitch(info, 0, args->width);
+ } else if (args->bpp) {
+ /*
+ * Some userspace throws in arbitrary values for bpp and
+ * relies on the kernel to figure it out. In this case we
+ * fall back to the old method of using bpp directly. The
+ * over-commitment of memory from the rounding is acceptable
+ * for compatibility with legacy userspace. We have a number
+ * of deprecated legacy values that are explicitly supported.
+ */
+ switch (args->bpp) {
+ default:
+ drm_warn_once(dev,
+ "Unknown color mode %u; guessing buffer size.\n",
+ args->bpp);
+ fallthrough;
+ /*
+ * These constants represent various YUV formats supported by
+ * drm_gem_afbc_get_bpp().
+ */
+ case 12: // DRM_FORMAT_YUV420_8BIT
+ case 15: // DRM_FORMAT_YUV420_10BIT
+ case 30: // DRM_FORMAT_VUY101010
+ fallthrough;
+ /*
+ * Used by Mesa and Gstreamer to allocate NV formats and others
+ * as RGB buffers. Technically, XRGB16161616F formats are RGB,
+ * but the dumb buffers are not supposed to be used for anything
+ * beyond 32 bits per pixels.
+ */
+ case 10: // DRM_FORMAT_NV{15,20,30}, DRM_FORMAT_P010
+ case 64: // DRM_FORMAT_{XRGB,XBGR,ARGB,ABGR}16161616F
+ pitch = args->width * DIV_ROUND_UP(args->bpp, SZ_8);
+ break;
+ }
+ }
+
+ if (!pitch || pitch > U32_MAX)
+ return -EINVAL;
+
+ args->pitch = pitch;
+
+ return drm_mode_align_dumb(args, hw_pitch_align, hw_size_align);
+}
+EXPORT_SYMBOL(drm_mode_size_dumb);
+
int drm_mode_create_dumb(struct drm_device *dev,
struct drm_mode_create_dumb *args,
struct drm_file *file_priv)
@@ -99,7 +229,30 @@ int drm_mode_create_dumb(struct drm_device *dev,
int drm_mode_create_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
- return drm_mode_create_dumb(dev, data, file_priv);
+ struct drm_mode_create_dumb *args = data;
+ int err;
+
+ err = drm_mode_create_dumb(dev, args, file_priv);
+ if (err) {
+ args->handle = 0;
+ args->pitch = 0;
+ args->size = 0;
+ }
+ return err;
+}
+
+static int drm_mode_mmap_dumb(struct drm_device *dev, struct drm_mode_map_dumb *args,
+ struct drm_file *file_priv)
+{
+ if (!dev->driver->dumb_create)
+ return -ENOSYS;
+
+ if (dev->driver->dumb_map_offset)
+ return dev->driver->dumb_map_offset(file_priv, dev, args->handle,
+ &args->offset);
+ else
+ return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
+ &args->offset);
}
/**
@@ -120,17 +273,12 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_map_dumb *args = data;
+ int err;
- if (!dev->driver->dumb_create)
- return -ENOSYS;
-
- if (dev->driver->dumb_map_offset)
- return dev->driver->dumb_map_offset(file_priv, dev,
- args->handle,
- &args->offset);
- else
- return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
- &args->offset);
+ err = drm_mode_mmap_dumb(dev, args, file_priv);
+ if (err)
+ args->offset = 0;
+ return err;
}
int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle,
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 11a5b60cb9ce..53e9dc0543de 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -368,6 +368,10 @@ static void drm_fb_helper_fb_dirty(struct drm_fb_helper *helper)
unsigned long flags;
int ret;
+ mutex_lock(&helper->lock);
+ drm_client_modeset_wait_for_vblank(&helper->client, 0);
+ mutex_unlock(&helper->lock);
+
if (drm_WARN_ON_ONCE(dev, !helper->funcs->fb_dirty))
return;
@@ -1068,15 +1072,9 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
- struct drm_crtc *crtc;
int ret = 0;
- mutex_lock(&fb_helper->lock);
- if (!drm_master_internal_acquire(dev)) {
- ret = -EBUSY;
- goto unlock;
- }
+ guard(mutex)(&fb_helper->lock);
switch (cmd) {
case FBIO_WAITFORVSYNC:
@@ -1096,28 +1094,12 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
* make. If we're not smart enough here, one should
* just consider switch the userspace to KMS.
*/
- crtc = fb_helper->client.modesets[0].crtc;
-
- /*
- * Only wait for a vblank event if the CRTC is
- * enabled, otherwise just don't do anythintg,
- * not even report an error.
- */
- ret = drm_crtc_vblank_get(crtc);
- if (!ret) {
- drm_crtc_wait_one_vblank(crtc);
- drm_crtc_vblank_put(crtc);
- }
-
- ret = 0;
+ ret = drm_client_modeset_wait_for_vblank(&fb_helper->client, 0);
break;
default:
ret = -ENOTTY;
}
- drm_master_internal_release(dev);
-unlock:
- mutex_unlock(&fb_helper->lock);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_ioctl);
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 006836554cc2..6cddf05c493b 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -1165,97 +1165,6 @@ void drm_fb_argb8888_to_argb4444(struct iosys_map *dst, const unsigned int *dst_
}
EXPORT_SYMBOL(drm_fb_argb8888_to_argb4444);
-/**
- * drm_fb_blit - Copy parts of a framebuffer to display memory
- * @dst: Array of display-memory addresses to copy to
- * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
- * within @dst; can be NULL if scanlines are stored next to each other.
- * @dst_format: FOURCC code of the display's color format
- * @src: The framebuffer memory to copy from
- * @fb: The framebuffer to copy from
- * @clip: Clip rectangle area to copy
- * @state: Transform and conversion state
- *
- * This function copies parts of a framebuffer to display memory. If the
- * formats of the display and the framebuffer mismatch, the blit function
- * will attempt to convert between them during the process. The parameters @dst,
- * @dst_pitch and @src refer to arrays. Each array must have at least as many
- * entries as there are planes in @dst_format's format. Each entry stores the
- * value for the format's respective color plane at the same index.
- *
- * This function does not apply clipping on @dst (i.e. the destination is at the
- * top-left corner).
- *
- * Returns:
- * 0 on success, or
- * -EINVAL if the color-format conversion failed, or
- * a negative error code otherwise.
- */
-int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format,
- const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, struct drm_format_conv_state *state)
-{
- uint32_t fb_format = fb->format->format;
-
- if (fb_format == dst_format) {
- drm_fb_memcpy(dst, dst_pitch, src, fb, clip);
- return 0;
- } else if (fb_format == (dst_format | DRM_FORMAT_BIG_ENDIAN)) {
- drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state);
- return 0;
- } else if (fb_format == (dst_format & ~DRM_FORMAT_BIG_ENDIAN)) {
- drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state);
- return 0;
- } else if (fb_format == DRM_FORMAT_XRGB8888) {
- if (dst_format == DRM_FORMAT_RGB565) {
- drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_XRGB1555) {
- drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_ARGB1555) {
- drm_fb_xrgb8888_to_argb1555(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_RGBA5551) {
- drm_fb_xrgb8888_to_rgba5551(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_RGB888) {
- drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_BGR888) {
- drm_fb_xrgb8888_to_bgr888(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_ARGB8888) {
- drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_XBGR8888) {
- drm_fb_xrgb8888_to_xbgr8888(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_ABGR8888) {
- drm_fb_xrgb8888_to_abgr8888(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_XRGB2101010) {
- drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_ARGB2101010) {
- drm_fb_xrgb8888_to_argb2101010(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_BGRX8888) {
- drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_RGB332) {
- drm_fb_xrgb8888_to_rgb332(dst, dst_pitch, src, fb, clip, state);
- return 0;
- }
- }
-
- drm_warn_once(fb->dev, "No conversion helper from %p4cc to %p4cc found.\n",
- &fb_format, &dst_format);
-
- return -EINVAL;
-}
-EXPORT_SYMBOL(drm_fb_blit);
-
static void drm_fb_gray8_to_gray2_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
u8 *dbuf8 = dbuf;
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index adbb73f00d68..18e753ade001 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -1048,7 +1048,7 @@ retry:
plane_state->crtc->base.id,
plane_state->crtc->name, fb->base.id);
- crtc_state = drm_atomic_get_existing_crtc_state(state, plane_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
ret = drm_atomic_add_affected_connectors(state, plane_state->crtc);
if (ret)
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index f884d155a832..a1a9c828938b 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -101,10 +101,8 @@ drm_gem_init(struct drm_device *dev)
vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
GFP_KERNEL);
- if (!vma_offset_manager) {
- DRM_ERROR("out of memory\n");
+ if (!vma_offset_manager)
return -ENOMEM;
- }
dev->vma_offset_manager = vma_offset_manager;
drm_vma_offset_manager_init(vma_offset_manager,
@@ -785,9 +783,10 @@ static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
int count, struct drm_gem_object ***objs_out)
{
- int ret;
- u32 *handles;
+ struct drm_device *dev = filp->minor->dev;
struct drm_gem_object **objs;
+ u32 *handles;
+ int ret;
if (!count)
return 0;
@@ -807,7 +806,7 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
ret = -EFAULT;
- DRM_DEBUG("Failed to copy in GEM handles\n");
+ drm_dbg_core(dev, "Failed to copy in GEM handles\n");
goto out;
}
@@ -855,12 +854,13 @@ EXPORT_SYMBOL(drm_gem_object_lookup);
long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
bool wait_all, unsigned long timeout)
{
- long ret;
+ struct drm_device *dev = filep->minor->dev;
struct drm_gem_object *obj;
+ long ret;
obj = drm_gem_object_lookup(filep, handle);
if (!obj) {
- DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
+ drm_dbg_core(dev, "Failed to look up GEM BO %d\n", handle);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c
index 4f0320df858f..9c9bfc9e85c6 100644
--- a/drivers/gpu/drm/drm_gem_dma_helper.c
+++ b/drivers/gpu/drm/drm_gem_dma_helper.c
@@ -20,6 +20,7 @@
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_vma_manager.h>
@@ -304,9 +305,11 @@ int drm_gem_dma_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args)
{
struct drm_gem_dma_object *dma_obj;
+ int ret;
- args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
- args->size = args->pitch * args->height;
+ ret = drm_mode_size_dumb(drm, args, SZ_8, 0);
+ if (ret)
+ return ret;
dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size,
&args->handle);
@@ -582,7 +585,7 @@ drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev,
ret = dma_buf_vmap_unlocked(attach->dmabuf, &map);
if (ret) {
- DRM_ERROR("Failed to vmap PRIME buffer\n");
+ drm_err(dev, "Failed to vmap PRIME buffer\n");
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 5d1349c34afd..dc94a27710e5 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -18,6 +18,7 @@
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_prime.h>
#include <drm/drm_print.h>
@@ -48,28 +49,12 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
.vm_ops = &drm_gem_shmem_vm_ops,
};
-static struct drm_gem_shmem_object *
-__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
- struct vfsmount *gemfs)
+static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem,
+ size_t size, bool private, struct vfsmount *gemfs)
{
- struct drm_gem_shmem_object *shmem;
- struct drm_gem_object *obj;
+ struct drm_gem_object *obj = &shmem->base;
int ret = 0;
- size = PAGE_ALIGN(size);
-
- if (dev->driver->gem_create_object) {
- obj = dev->driver->gem_create_object(dev, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
- shmem = to_drm_gem_shmem_obj(obj);
- } else {
- shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
- if (!shmem)
- return ERR_PTR(-ENOMEM);
- obj = &shmem->base;
- }
-
if (!obj->funcs)
obj->funcs = &drm_gem_shmem_funcs;
@@ -81,7 +66,7 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
}
if (ret) {
drm_gem_private_object_fini(obj);
- goto err_free;
+ return ret;
}
ret = drm_gem_create_mmap_offset(obj);
@@ -102,14 +87,55 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
__GFP_RETRY_MAYFAIL | __GFP_NOWARN);
}
- return shmem;
-
+ return 0;
err_release:
drm_gem_object_release(obj);
-err_free:
- kfree(obj);
+ return ret;
+}
- return ERR_PTR(ret);
+/**
+ * drm_gem_shmem_init - Initialize an allocated object.
+ * @dev: DRM device
+ * @obj: The allocated shmem GEM object.
+ *
+ * Returns:
+ * 0 on success, or a negative error code on failure.
+ */
+int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size)
+{
+ return __drm_gem_shmem_init(dev, shmem, size, false, NULL);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_init);
+
+static struct drm_gem_shmem_object *
+__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
+ struct vfsmount *gemfs)
+{
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ size = PAGE_ALIGN(size);
+
+ if (dev->driver->gem_create_object) {
+ obj = dev->driver->gem_create_object(dev, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+ shmem = to_drm_gem_shmem_obj(obj);
+ } else {
+ shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
+ if (!shmem)
+ return ERR_PTR(-ENOMEM);
+ obj = &shmem->base;
+ }
+
+ ret = __drm_gem_shmem_init(dev, shmem, size, private, gemfs);
+ if (ret) {
+ kfree(obj);
+ return ERR_PTR(ret);
+ }
+
+ return shmem;
}
/**
* drm_gem_shmem_create - Allocate an object with the given size
@@ -150,13 +176,13 @@ struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *de
EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt);
/**
- * drm_gem_shmem_free - Free resources associated with a shmem GEM object
- * @shmem: shmem GEM object to free
+ * drm_gem_shmem_release - Release resources associated with a shmem GEM object.
+ * @shmem: shmem GEM object
*
- * This function cleans up the GEM object state and frees the memory used to
- * store the object itself.
+ * This function cleans up the GEM object state, but does not free the memory used to store the
+ * object itself. This function is meant to be a dedicated helper for the Rust GEM bindings.
*/
-void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
@@ -183,6 +209,19 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
}
drm_gem_object_release(obj);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_release);
+
+/**
+ * drm_gem_shmem_free - Free resources associated with a shmem GEM object
+ * @shmem: shmem GEM object to free
+ *
+ * This function cleans up the GEM object state and frees the memory used to
+ * store the object itself.
+ */
+void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
+{
+ drm_gem_shmem_release(shmem);
kfree(shmem);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
@@ -518,18 +557,11 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked);
int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ int ret;
- if (!args->pitch || !args->size) {
- args->pitch = min_pitch;
- args->size = PAGE_ALIGN(args->pitch * args->height);
- } else {
- /* ensure sane minimum values */
- if (args->pitch < min_pitch)
- args->pitch = min_pitch;
- if (args->size < args->pitch * args->height)
- args->size = PAGE_ALIGN(args->pitch * args->height);
- }
+ ret = drm_mode_size_dumb(dev, args, SZ_8, 0);
+ if (ret)
+ return ret;
return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
}
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index b04cde4a60e7..0bec6f66682b 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -107,7 +107,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
{
- /* We got here via ttm_bo_put(), which means that the
+ /* We got here via ttm_bo_fini(), which means that the
* TTM buffer object in 'bo' has already been cleaned
* up; only release the GEM object.
*/
@@ -234,11 +234,11 @@ EXPORT_SYMBOL(drm_gem_vram_create);
* drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object
* @gbo: the GEM VRAM object
*
- * See ttm_bo_put() for more information.
+ * See ttm_bo_fini() for more information.
*/
void drm_gem_vram_put(struct drm_gem_vram_object *gbo)
{
- ttm_bo_put(&gbo->bo);
+ ttm_bo_fini(&gbo->bo);
}
EXPORT_SYMBOL(drm_gem_vram_put);
@@ -967,7 +967,7 @@ drm_vram_helper_mode_valid_internal(struct drm_device *dev,
max_fbpages = (vmm->vram_size / 2) >> PAGE_SHIFT;
- fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
+ fbsize = (u32)mode->hdisplay * mode->vdisplay * max_bpp;
fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
if (fbpages > max_fbpages)
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index cb906765897e..73e550c8ff8c 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -1363,7 +1363,8 @@ map_pages:
order = drm_gpusvm_hmm_pfn_to_order(pfns[i], i, npages);
if (is_device_private_page(page) ||
is_device_coherent_page(page)) {
- if (zdd != page->zone_device_data && i > 0) {
+ if (!ctx->allow_mixed &&
+ zdd != page->zone_device_data && i > 0) {
err = -EOPNOTSUPP;
goto err_unmap;
}
@@ -1399,7 +1400,8 @@ map_pages:
} else {
dma_addr_t addr;
- if (is_zone_device_page(page) || pagemap) {
+ if (is_zone_device_page(page) ||
+ (pagemap && !ctx->allow_mixed)) {
err = -EOPNOTSUPP;
goto err_unmap;
}
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index e33c78fc8fbd..b488c91c20a5 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -691,7 +691,7 @@ int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev,
const struct drm_simple_display_pipe_funcs *funcs,
const struct drm_display_mode *mode, unsigned int rotation)
{
- size_t bufsize = mode->vdisplay * mode->hdisplay * sizeof(u16);
+ size_t bufsize = (u32)mode->vdisplay * mode->hdisplay * sizeof(u16);
dbidev->drm.mode_config.preferred_depth = 16;
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index 988735560570..a57f6a10ada4 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -203,10 +203,10 @@ int drm_mode_config_helper_suspend(struct drm_device *dev)
if (dev->mode_config.poll_enabled)
drm_kms_helper_poll_disable(dev);
- drm_client_dev_suspend(dev, false);
+ drm_client_dev_suspend(dev);
state = drm_atomic_helper_suspend(dev);
if (IS_ERR(state)) {
- drm_client_dev_resume(dev, false);
+ drm_client_dev_resume(dev);
/*
* Don't enable polling if it was never initialized
@@ -252,7 +252,7 @@ int drm_mode_config_helper_resume(struct drm_device *dev)
DRM_ERROR("Failed to resume (%d)\n", ret);
dev->mode_config.suspend_state = NULL;
- drm_client_dev_resume(dev, false);
+ drm_client_dev_resume(dev);
/*
* Don't enable polling if it is not initialized
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 46f59883183d..61e211fd3c9c 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -136,8 +136,17 @@
* vblanks after a timer has expired, which can be configured through the
* ``vblankoffdelay`` module parameter.
*
- * Drivers for hardware without support for vertical-blanking interrupts
- * must not call drm_vblank_init(). For such drivers, atomic helpers will
+ * Drivers for hardware without support for vertical-blanking interrupts can
+ * use DRM vblank timers to send vblank events at the rate of the current
+ * display mode's refresh. While not synchronized to the hardware's
+ * vertical-blanking regions, the timer helps DRM clients and compositors to
+ * adapt their update cycle to the display output. Drivers should set up
+ * vblanking as usual, but call drm_crtc_vblank_start_timer() and
+ * drm_crtc_vblank_cancel_timer() as part of their atomic mode setting.
+ * See also DRM vblank helpers for more information.
+ *
+ * Drivers without support for vertical-blanking interrupts nor timers must
+ * not call drm_vblank_init(). For these drivers, atomic helpers will
* automatically generate fake vblank events as part of the display update.
* This functionality also can be controlled by the driver by enabling and
* disabling struct drm_crtc_state.no_vblank.
@@ -508,6 +517,9 @@ static void drm_vblank_init_release(struct drm_device *dev, void *ptr)
drm_WARN_ON(dev, READ_ONCE(vblank->enabled) &&
drm_core_check_feature(dev, DRIVER_MODESET));
+ if (vblank->vblank_timer.crtc)
+ hrtimer_cancel(&vblank->vblank_timer.timer);
+
drm_vblank_destroy_worker(vblank);
timer_delete_sync(&vblank->disable_timer);
}
@@ -2162,3 +2174,159 @@ err_free:
return ret;
}
+/*
+ * VBLANK timer
+ */
+
+static enum hrtimer_restart drm_vblank_timer_function(struct hrtimer *timer)
+{
+ struct drm_vblank_crtc_timer *vtimer =
+ container_of(timer, struct drm_vblank_crtc_timer, timer);
+ struct drm_crtc *crtc = vtimer->crtc;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+ ktime_t interval;
+ u64 ret_overrun;
+ bool succ;
+
+ spin_lock_irqsave(&vtimer->interval_lock, flags);
+ interval = vtimer->interval;
+ spin_unlock_irqrestore(&vtimer->interval_lock, flags);
+
+ if (!interval)
+ return HRTIMER_NORESTART;
+
+ ret_overrun = hrtimer_forward_now(&vtimer->timer, interval);
+ if (ret_overrun != 1)
+ drm_dbg_vbl(dev, "vblank timer overrun\n");
+
+ if (crtc_funcs->handle_vblank_timeout)
+ succ = crtc_funcs->handle_vblank_timeout(crtc);
+ else
+ succ = drm_crtc_handle_vblank(crtc);
+ if (!succ)
+ return HRTIMER_NORESTART;
+
+ return HRTIMER_RESTART;
+}
+
+/**
+ * drm_crtc_vblank_start_timer - Starts the vblank timer on the given CRTC
+ * @crtc: the CRTC
+ *
+ * Drivers should call this function from their CRTC's enable_vblank
+ * function to start a vblank timer. The timer will fire after the duration
+ * of a full frame. drm_crtc_vblank_cancel_timer() disables a running timer.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+int drm_crtc_vblank_start_timer(struct drm_crtc *crtc)
+{
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+ struct drm_vblank_crtc_timer *vtimer = &vblank->vblank_timer;
+ unsigned long flags;
+
+ if (!vtimer->crtc) {
+ /*
+ * Set up the data structures on the first invocation.
+ */
+ vtimer->crtc = crtc;
+ spin_lock_init(&vtimer->interval_lock);
+ hrtimer_setup(&vtimer->timer, drm_vblank_timer_function,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ } else {
+ /*
+ * Timer should not be active. If it is, wait for the
+ * previous cancel operations to finish.
+ */
+ while (hrtimer_active(&vtimer->timer))
+ hrtimer_try_to_cancel(&vtimer->timer);
+ }
+
+ drm_calc_timestamping_constants(crtc, &crtc->mode);
+
+ spin_lock_irqsave(&vtimer->interval_lock, flags);
+ vtimer->interval = ns_to_ktime(vblank->framedur_ns);
+ spin_unlock_irqrestore(&vtimer->interval_lock, flags);
+
+ hrtimer_start(&vtimer->timer, vtimer->interval, HRTIMER_MODE_REL);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_crtc_vblank_start_timer);
+
+/**
+ * drm_crtc_vblank_start_timer - Cancels the given CRTC's vblank timer
+ * @crtc: the CRTC
+ *
+ * Drivers should call this function from their CRTC's disable_vblank
+ * function to stop a vblank timer.
+ */
+void drm_crtc_vblank_cancel_timer(struct drm_crtc *crtc)
+{
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+ struct drm_vblank_crtc_timer *vtimer = &vblank->vblank_timer;
+ unsigned long flags;
+
+ /*
+ * Calling hrtimer_cancel() can result in a deadlock with DRM's
+ * vblank_time_lime_lock and hrtimers' softirq_expiry_lock. So
+ * clear interval and indicate cancellation. The timer function
+ * will cancel itself on the next invocation.
+ */
+
+ spin_lock_irqsave(&vtimer->interval_lock, flags);
+ vtimer->interval = 0;
+ spin_unlock_irqrestore(&vtimer->interval_lock, flags);
+
+ hrtimer_try_to_cancel(&vtimer->timer);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_cancel_timer);
+
+/**
+ * drm_crtc_vblank_get_vblank_timeout - Returns the vblank timeout
+ * @crtc: The CRTC
+ * @vblank_time: Returns the next vblank timestamp
+ *
+ * The helper drm_crtc_vblank_get_vblank_timeout() returns the next vblank
+ * timestamp of the CRTC's vblank timer according to the timer's expiry
+ * time.
+ */
+void drm_crtc_vblank_get_vblank_timeout(struct drm_crtc *crtc, ktime_t *vblank_time)
+{
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+ struct drm_vblank_crtc_timer *vtimer = &vblank->vblank_timer;
+ u64 cur_count;
+ ktime_t cur_time;
+
+ if (!READ_ONCE(vblank->enabled)) {
+ *vblank_time = ktime_get();
+ return;
+ }
+
+ /*
+ * A concurrent vblank timeout could update the expires field before
+ * we compare it with the vblank time. Hence we'd compare the old
+ * expiry time to the new vblank time; deducing the timer had already
+ * expired. Reread until we get consistent values from both fields.
+ */
+ do {
+ cur_count = drm_crtc_vblank_count_and_time(crtc, &cur_time);
+ *vblank_time = READ_ONCE(vtimer->timer.node.expires);
+ } while (cur_count != drm_crtc_vblank_count_and_time(crtc, &cur_time));
+
+ if (drm_WARN_ON(crtc->dev, !ktime_compare(*vblank_time, cur_time)))
+ return; /* Already expired */
+
+ /*
+ * To prevent races we roll the hrtimer forward before we do any
+ * interrupt processing - this is how real hw works (the interrupt
+ * is only generated after all the vblank registers are updated)
+ * and what the vblank core expects. Therefore we need to always
+ * correct the timestamp by one frame.
+ */
+ *vblank_time = ktime_sub(*vblank_time, vtimer->interval);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_get_vblank_timeout);
diff --git a/drivers/gpu/drm/drm_vblank_helper.c b/drivers/gpu/drm/drm_vblank_helper.c
new file mode 100644
index 000000000000..a04a6ba1b0ca
--- /dev/null
+++ b/drivers/gpu/drm/drm_vblank_helper.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: MIT
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_vblank_helper.h>
+
+/**
+ * DOC: overview
+ *
+ * The vblank helper library provides functions for supporting vertical
+ * blanking in DRM drivers.
+ *
+ * For vblank timers, several callback implementations are available.
+ * Drivers enable support for vblank timers by setting the vblank callbacks
+ * in struct &drm_crtc_funcs to the helpers provided by this library. The
+ * initializer macro DRM_CRTC_VBLANK_TIMER_FUNCS does this conveniently.
+ * The driver further has to send the VBLANK event from its atomic_flush
+ * callback and control vblank from the CRTC's atomic_enable and atomic_disable
+ * callbacks. The callbacks are located in struct &drm_crtc_helper_funcs.
+ * The vblank helper library provides implementations of these callbacks
+ * for drivers without further requirements. The initializer macro
+ * DRM_CRTC_HELPER_VBLANK_FUNCS sets them coveniently.
+ *
+ * Once the driver enables vblank support with drm_vblank_init(), each
+ * CRTC's vblank timer fires according to the programmed display mode. By
+ * default, the vblank timer invokes drm_crtc_handle_vblank(). Drivers with
+ * more specific requirements can set their own handler function in
+ * struct &drm_crtc_helper_funcs.handle_vblank_timeout.
+ */
+
+/*
+ * VBLANK helpers
+ */
+
+/**
+ * drm_crtc_vblank_atomic_flush -
+ * Implements struct &drm_crtc_helper_funcs.atomic_flush
+ * @crtc: The CRTC
+ * @state: The atomic state to apply
+ *
+ * The helper drm_crtc_vblank_atomic_flush() implements atomic_flush of
+ * struct drm_crtc_helper_funcs for CRTCs that only need to send out a
+ * VBLANK event.
+ *
+ * See also struct &drm_crtc_helper_funcs.atomic_flush.
+ */
+void drm_crtc_vblank_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_pending_vblank_event *event;
+
+ spin_lock_irq(&dev->event_lock);
+
+ event = crtc_state->event;
+ crtc_state->event = NULL;
+
+ if (event) {
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ }
+
+ spin_unlock_irq(&dev->event_lock);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_atomic_flush);
+
+/**
+ * drm_crtc_vblank_atomic_enable - Implements struct &drm_crtc_helper_funcs.atomic_enable
+ * @crtc: The CRTC
+ * @state: The atomic state
+ *
+ * The helper drm_crtc_vblank_atomic_enable() implements atomic_enable
+ * of struct drm_crtc_helper_funcs for CRTCs the only need to enable VBLANKs.
+ *
+ * See also struct &drm_crtc_helper_funcs.atomic_enable.
+ */
+void drm_crtc_vblank_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ drm_crtc_vblank_on(crtc);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_atomic_enable);
+
+/**
+ * drm_crtc_vblank_atomic_disable - Implements struct &drm_crtc_helper_funcs.atomic_disable
+ * @crtc: The CRTC
+ * @state: The atomic state
+ *
+ * The helper drm_crtc_vblank_atomic_disable() implements atomic_disable
+ * of struct drm_crtc_helper_funcs for CRTCs the only need to disable VBLANKs.
+ *
+ * See also struct &drm_crtc_funcs.atomic_disable.
+ */
+void drm_crtc_vblank_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ drm_crtc_vblank_off(crtc);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_atomic_disable);
+
+/*
+ * VBLANK timer
+ */
+
+/**
+ * drm_crtc_vblank_helper_enable_vblank_timer - Implements struct &drm_crtc_funcs.enable_vblank
+ * @crtc: The CRTC
+ *
+ * The helper drm_crtc_vblank_helper_enable_vblank_timer() implements
+ * enable_vblank of struct drm_crtc_helper_funcs for CRTCs that require
+ * a VBLANK timer. It sets up the timer on the first invocation. The
+ * started timer expires after the current frame duration. See struct
+ * &drm_vblank_crtc.framedur_ns.
+ *
+ * See also struct &drm_crtc_helper_funcs.enable_vblank.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+int drm_crtc_vblank_helper_enable_vblank_timer(struct drm_crtc *crtc)
+{
+ return drm_crtc_vblank_start_timer(crtc);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_helper_enable_vblank_timer);
+
+/**
+ * drm_crtc_vblank_helper_disable_vblank_timer - Implements struct &drm_crtc_funcs.disable_vblank
+ * @crtc: The CRTC
+ *
+ * The helper drm_crtc_vblank_helper_disable_vblank_timer() implements
+ * disable_vblank of struct drm_crtc_funcs for CRTCs that require a
+ * VBLANK timer.
+ *
+ * See also struct &drm_crtc_helper_funcs.disable_vblank.
+ */
+void drm_crtc_vblank_helper_disable_vblank_timer(struct drm_crtc *crtc)
+{
+ drm_crtc_vblank_cancel_timer(crtc);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_helper_disable_vblank_timer);
+
+/**
+ * drm_crtc_vblank_helper_get_vblank_timestamp_from_timer -
+ * Implements struct &drm_crtc_funcs.get_vblank_timestamp
+ * @crtc: The CRTC
+ * @max_error: Maximum acceptable error
+ * @vblank_time: Returns the next vblank timestamp
+ * @in_vblank_irq: True is called from drm_crtc_handle_vblank()
+ *
+ * The helper drm_crtc_helper_get_vblank_timestamp_from_timer() implements
+ * get_vblank_timestamp of struct drm_crtc_funcs for CRTCs that require a
+ * VBLANK timer. It returns the timestamp according to the timer's expiry
+ * time.
+ *
+ * See also struct &drm_crtc_funcs.get_vblank_timestamp.
+ *
+ * Returns:
+ * True on success, or false otherwise.
+ */
+bool drm_crtc_vblank_helper_get_vblank_timestamp_from_timer(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq)
+{
+ drm_crtc_vblank_get_vblank_timeout(crtc, vblank_time);
+
+ return true;
+}
+EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp_from_timer);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index e3fbb45f37a2..02714c9ab639 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -10,6 +10,7 @@
#include <linux/shmem_fs.h>
#include <linux/module.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_prime.h>
#include <drm/drm_vma_manager.h>
#include <drm/exynos_drm.h>
@@ -329,15 +330,16 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
unsigned int flags;
int ret;
+ ret = drm_mode_size_dumb(dev, args, 0, 0);
+ if (ret)
+ return ret;
+
/*
* allocate memory to be used for framebuffer.
* - this callback would be called by user application
* with DRM_IOCTL_MODE_CREATE_DUMB command.
*/
- args->pitch = args->width * ((args->bpp + 7) / 8);
- args->size = args->pitch * args->height;
-
if (is_drm_iommu_supported(dev))
flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
else
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 7c3aa77186d3..6400070a4c9b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -58,7 +58,7 @@ static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state)
struct drm_plane_state *state = &exynos_state->base;
struct drm_crtc *crtc = state->crtc;
struct drm_crtc_state *crtc_state =
- drm_atomic_get_existing_crtc_state(state->state, crtc);
+ drm_atomic_get_new_crtc_state(state->state, crtc);
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
diff --git a/drivers/gpu/drm/gma500/fbdev.c b/drivers/gpu/drm/gma500/fbdev.c
index 32d31e5f5f1a..a6af21514cff 100644
--- a/drivers/gpu/drm/gma500/fbdev.c
+++ b/drivers/gpu/drm/gma500/fbdev.c
@@ -50,48 +50,6 @@ static const struct vm_operations_struct psb_fbdev_vm_ops = {
* struct fb_ops
*/
-#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
-
-static int psb_fbdev_fb_setcolreg(unsigned int regno,
- unsigned int red, unsigned int green,
- unsigned int blue, unsigned int transp,
- struct fb_info *info)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_framebuffer *fb = fb_helper->fb;
- uint32_t v;
-
- if (!fb)
- return -ENOMEM;
-
- if (regno > 255)
- return 1;
-
- red = CMAP_TOHW(red, info->var.red.length);
- blue = CMAP_TOHW(blue, info->var.blue.length);
- green = CMAP_TOHW(green, info->var.green.length);
- transp = CMAP_TOHW(transp, info->var.transp.length);
-
- v = (red << info->var.red.offset) |
- (green << info->var.green.offset) |
- (blue << info->var.blue.offset) |
- (transp << info->var.transp.offset);
-
- if (regno < 16) {
- switch (fb->format->cpp[0] * 8) {
- case 16:
- ((uint32_t *) info->pseudo_palette)[regno] = v;
- break;
- case 24:
- case 32:
- ((uint32_t *) info->pseudo_palette)[regno] = v;
- break;
- }
- }
-
- return 0;
-}
-
static int psb_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
if (vma->vm_pgoff != 0)
@@ -135,7 +93,6 @@ static const struct fb_ops psb_fbdev_fb_ops = {
.owner = THIS_MODULE,
__FB_DEFAULT_IOMEM_OPS_RDWR,
DRM_FB_HELPER_DEFAULT_OPS,
- .fb_setcolreg = psb_fbdev_fb_setcolreg,
__FB_DEFAULT_IOMEM_OPS_DRAW,
.fb_mmap = psb_fbdev_fb_mmap,
.fb_destroy = psb_fbdev_fb_destroy,
diff --git a/drivers/gpu/drm/gud/gud_connector.c b/drivers/gpu/drm/gud/gud_connector.c
index 4a15695fa933..1726a3fadff8 100644
--- a/drivers/gpu/drm/gud/gud_connector.c
+++ b/drivers/gpu/drm/gud/gud_connector.c
@@ -561,11 +561,11 @@ static int gud_connector_add_properties(struct gud_device *gdrm, struct gud_conn
continue; /* not a DRM property */
property = gud_connector_property_lookup(connector, prop);
- if (WARN_ON(IS_ERR(property)))
+ if (drm_WARN_ON(drm, IS_ERR(property)))
continue;
state_val = gud_connector_tv_state_val(prop, &gconn->initial_tv_state);
- if (WARN_ON(IS_ERR(state_val)))
+ if (drm_WARN_ON(drm, IS_ERR(state_val)))
continue;
*state_val = val;
@@ -593,7 +593,7 @@ int gud_connector_fill_properties(struct drm_connector_state *connector_state,
unsigned int *state_val;
state_val = gud_connector_tv_state_val(prop, &connector_state->tv);
- if (WARN_ON_ONCE(IS_ERR(state_val)))
+ if (drm_WARN_ON_ONCE(connector_state->connector->dev, IS_ERR(state_val)))
return PTR_ERR(state_val);
val = *state_val;
@@ -667,7 +667,7 @@ static int gud_connector_create(struct gud_device *gdrm, unsigned int index,
return ret;
}
- if (WARN_ON(connector->index != index))
+ if (drm_WARN_ON(drm, connector->index != index))
return -EINVAL;
if (flags & GUD_CONNECTOR_FLAGS_POLL_STATUS)
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index 54d9aa9998e5..76d77a736d84 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -61,7 +61,7 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format
size_t len;
void *buf;
- WARN_ON_ONCE(format->char_per_block[0] != 1);
+ drm_WARN_ON_ONCE(fb->dev, format->char_per_block[0] != 1);
/* Start on a byte boundary */
rect->x1 = ALIGN_DOWN(rect->x1, block_width);
@@ -69,7 +69,7 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format
height = drm_rect_height(rect);
len = drm_format_info_min_pitch(format, 0, width) * height;
- buf = kmalloc(width * height, GFP_KERNEL);
+ buf = kmalloc_array(height, width, GFP_KERNEL);
if (!buf)
return 0;
@@ -138,7 +138,7 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma
pix = ((r >> 7) << 2) | ((g >> 7) << 1) | (b >> 7);
break;
default:
- WARN_ON_ONCE(1);
+ drm_WARN_ON_ONCE(fb->dev, 1);
return len;
}
@@ -527,7 +527,7 @@ int gud_plane_atomic_check(struct drm_plane *plane,
drm_connector_list_iter_end(&conn_iter);
}
- if (WARN_ON_ONCE(!connector_state))
+ if (drm_WARN_ON_ONCE(plane->dev, !connector_state))
return -ENOENT;
len = struct_size(req, properties,
@@ -539,7 +539,7 @@ int gud_plane_atomic_check(struct drm_plane *plane,
gud_from_display_mode(&req->mode, mode);
req->format = gud_from_fourcc(format->format);
- if (WARN_ON_ONCE(!req->format)) {
+ if (drm_WARN_ON_ONCE(plane->dev, !req->format)) {
ret = -EINVAL;
goto out;
}
@@ -561,7 +561,7 @@ int gud_plane_atomic_check(struct drm_plane *plane,
val = new_plane_state->rotation;
break;
default:
- WARN_ON_ONCE(1);
+ drm_WARN_ON_ONCE(plane->dev, 1);
ret = -EINVAL;
goto out;
}
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
index 945b9482bcb3..6e6eb1c12a68 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
@@ -19,6 +19,8 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_panic.h>
#include <drm/drm_plane.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_vblank_helper.h>
#include "hyperv_drm.h"
@@ -111,11 +113,15 @@ static void hyperv_crtc_helper_atomic_enable(struct drm_crtc *crtc,
crtc_state->mode.hdisplay,
crtc_state->mode.vdisplay,
plane_state->fb->pitches[0]);
+
+ drm_crtc_vblank_on(crtc);
}
static const struct drm_crtc_helper_funcs hyperv_crtc_helper_funcs = {
.atomic_check = drm_crtc_helper_atomic_check,
+ .atomic_flush = drm_crtc_vblank_atomic_flush,
.atomic_enable = hyperv_crtc_helper_atomic_enable,
+ .atomic_disable = drm_crtc_vblank_atomic_disable,
};
static const struct drm_crtc_funcs hyperv_crtc_funcs = {
@@ -125,6 +131,7 @@ static const struct drm_crtc_funcs hyperv_crtc_funcs = {
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ DRM_CRTC_VBLANK_TIMER_FUNCS,
};
static int hyperv_plane_atomic_check(struct drm_plane *plane,
@@ -321,6 +328,10 @@ int hyperv_mode_config_init(struct hyperv_drm_device *hv)
return ret;
}
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ret;
+
drm_mode_config_reset(dev);
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 1f4814968868..57bb111d65da 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -1029,7 +1029,7 @@ static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!obj->ttm.created);
- ttm_bo_put(i915_gem_to_ttm(obj));
+ ttm_bo_fini(i915_gem_to_ttm(obj));
}
static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
@@ -1325,7 +1325,7 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
* If this function fails, it will call the destructor, but
* our caller still owns the object. So no freeing in the
* destructor until obj->ttm.created is true.
- * Similarly, in delayed_destroy, we can't call ttm_bo_put()
+ * Similarly, in delayed_destroy, we can't call ttm_bo_fini()
* until successful initialization.
*/
ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), bo_type,
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index a28c3710c4d5..89be8da79d3b 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -978,7 +978,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_runtime_pm_disable(&i915->runtime_pm);
intel_power_domains_disable(display);
- drm_client_dev_suspend(&i915->drm, false);
+ drm_client_dev_suspend(&i915->drm);
if (intel_display_device_present(display)) {
drm_kms_helper_poll_disable(&i915->drm);
intel_display_driver_disable_user_access(display);
@@ -1061,7 +1061,7 @@ static int i915_drm_suspend(struct drm_device *dev)
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
intel_power_domains_disable(display);
- drm_client_dev_suspend(dev, false);
+ drm_client_dev_suspend(dev);
if (intel_display_device_present(display)) {
drm_kms_helper_poll_disable(dev);
intel_display_driver_disable_user_access(display);
@@ -1245,7 +1245,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_opregion_resume(display);
- drm_client_dev_resume(dev, false);
+ drm_client_dev_resume(dev);
intel_power_domains_enable(display);
diff --git a/drivers/gpu/drm/imx/dc/dc-ed.c b/drivers/gpu/drm/imx/dc/dc-ed.c
index 86ecc22d0a55..d42f33d6f3fc 100644
--- a/drivers/gpu/drm/imx/dc/dc-ed.c
+++ b/drivers/gpu/drm/imx/dc/dc-ed.c
@@ -15,12 +15,12 @@
#include "dc-pe.h"
#define PIXENGCFG_STATIC 0x8
-#define POWERDOWN BIT(4)
-#define SYNC_MODE BIT(8)
-#define SINGLE 0
#define DIV_MASK GENMASK(23, 16)
#define DIV(x) FIELD_PREP(DIV_MASK, (x))
#define DIV_RESET 0x80
+#define SYNC_MODE BIT(8)
+#define SINGLE 0
+#define POWERDOWN BIT(4)
#define PIXENGCFG_DYNAMIC 0xc
@@ -28,9 +28,9 @@
#define SYNC_TRIGGER BIT(0)
#define STATICCONTROL 0x8
+#define PERFCOUNTMODE BIT(12)
#define KICK_MODE BIT(8)
#define EXTERNAL BIT(8)
-#define PERFCOUNTMODE BIT(12)
#define CONTROL 0xc
#define GAMMAAPPLYENABLE BIT(0)
diff --git a/drivers/gpu/drm/imx/dc/dc-fg.c b/drivers/gpu/drm/imx/dc/dc-fg.c
index 7f6c1852bf72..28f372be9247 100644
--- a/drivers/gpu/drm/imx/dc/dc-fg.c
+++ b/drivers/gpu/drm/imx/dc/dc-fg.c
@@ -56,9 +56,9 @@
#define FGINCTRL 0x5c
#define FGINCTRLPANIC 0x60
-#define FGDM_MASK GENMASK(2, 0)
-#define ENPRIMALPHA BIT(3)
#define ENSECALPHA BIT(4)
+#define ENPRIMALPHA BIT(3)
+#define FGDM_MASK GENMASK(2, 0)
#define FGCCR 0x64
#define CCGREEN(x) FIELD_PREP(GENMASK(19, 10), (x))
diff --git a/drivers/gpu/drm/imx/dc/dc-fu.c b/drivers/gpu/drm/imx/dc/dc-fu.c
index f94c591c8158..1d8f74babef8 100644
--- a/drivers/gpu/drm/imx/dc/dc-fu.c
+++ b/drivers/gpu/drm/imx/dc/dc-fu.c
@@ -18,11 +18,11 @@
#define BASEADDRESSAUTOUPDATE(x) FIELD_PREP(BASEADDRESSAUTOUPDATE_MASK, (x))
/* BURSTBUFFERMANAGEMENT */
+#define LINEMODE_MASK BIT(31)
#define SETBURSTLENGTH_MASK GENMASK(12, 8)
#define SETBURSTLENGTH(x) FIELD_PREP(SETBURSTLENGTH_MASK, (x))
#define SETNUMBUFFERS_MASK GENMASK(7, 0)
#define SETNUMBUFFERS(x) FIELD_PREP(SETNUMBUFFERS_MASK, (x))
-#define LINEMODE_MASK BIT(31)
/* SOURCEBUFFERATTRIBUTES */
#define BITSPERPIXEL_MASK GENMASK(21, 16)
@@ -31,20 +31,20 @@
#define STRIDE(x) FIELD_PREP(STRIDE_MASK, (x) - 1)
/* SOURCEBUFFERDIMENSION */
-#define LINEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x))
#define LINECOUNT(x) FIELD_PREP(GENMASK(29, 16), (x))
+#define LINEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x))
/* LAYEROFFSET */
-#define LAYERXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x))
#define LAYERYOFFSET(x) FIELD_PREP(GENMASK(30, 16), (x))
+#define LAYERXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x))
/* CLIPWINDOWOFFSET */
-#define CLIPWINDOWXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x))
#define CLIPWINDOWYOFFSET(x) FIELD_PREP(GENMASK(30, 16), (x))
+#define CLIPWINDOWXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x))
/* CLIPWINDOWDIMENSIONS */
-#define CLIPWINDOWWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x) - 1)
#define CLIPWINDOWHEIGHT(x) FIELD_PREP(GENMASK(29, 16), (x) - 1)
+#define CLIPWINDOWWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x) - 1)
enum dc_linemode {
/*
diff --git a/drivers/gpu/drm/imx/dc/dc-fu.h b/drivers/gpu/drm/imx/dc/dc-fu.h
index e016e1ea5b4e..f678de3ca8c0 100644
--- a/drivers/gpu/drm/imx/dc/dc-fu.h
+++ b/drivers/gpu/drm/imx/dc/dc-fu.h
@@ -33,13 +33,13 @@
#define A_SHIFT(x) FIELD_PREP_CONST(GENMASK(4, 0), (x))
/* LAYERPROPERTY */
+#define SOURCEBUFFERENABLE BIT(31)
#define YUVCONVERSIONMODE_MASK GENMASK(18, 17)
#define YUVCONVERSIONMODE(x) FIELD_PREP(YUVCONVERSIONMODE_MASK, (x))
-#define SOURCEBUFFERENABLE BIT(31)
/* FRAMEDIMENSIONS */
-#define FRAMEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x))
#define FRAMEHEIGHT(x) FIELD_PREP(GENMASK(29, 16), (x))
+#define FRAMEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x))
/* CONTROL */
#define INPUTSELECT_MASK GENMASK(4, 3)
diff --git a/drivers/gpu/drm/imx/dc/dc-lb.c b/drivers/gpu/drm/imx/dc/dc-lb.c
index 38f966625d38..ca1d714c8d6e 100644
--- a/drivers/gpu/drm/imx/dc/dc-lb.c
+++ b/drivers/gpu/drm/imx/dc/dc-lb.c
@@ -17,12 +17,12 @@
#include "dc-pe.h"
#define PIXENGCFG_DYNAMIC 0x8
-#define PIXENGCFG_DYNAMIC_PRIM_SEL_MASK GENMASK(5, 0)
-#define PIXENGCFG_DYNAMIC_PRIM_SEL(x) \
- FIELD_PREP(PIXENGCFG_DYNAMIC_PRIM_SEL_MASK, (x))
#define PIXENGCFG_DYNAMIC_SEC_SEL_MASK GENMASK(13, 8)
#define PIXENGCFG_DYNAMIC_SEC_SEL(x) \
FIELD_PREP(PIXENGCFG_DYNAMIC_SEC_SEL_MASK, (x))
+#define PIXENGCFG_DYNAMIC_PRIM_SEL_MASK GENMASK(5, 0)
+#define PIXENGCFG_DYNAMIC_PRIM_SEL(x) \
+ FIELD_PREP(PIXENGCFG_DYNAMIC_PRIM_SEL_MASK, (x))
#define STATICCONTROL 0x8
#define SHDTOKSEL_MASK GENMASK(4, 3)
@@ -37,24 +37,24 @@
#define BLENDCONTROL 0x10
#define ALPHA_MASK GENMASK(23, 16)
#define ALPHA(x) FIELD_PREP(ALPHA_MASK, (x))
-#define PRIM_C_BLD_FUNC_MASK GENMASK(2, 0)
-#define PRIM_C_BLD_FUNC(x) \
- FIELD_PREP(PRIM_C_BLD_FUNC_MASK, (x))
-#define SEC_C_BLD_FUNC_MASK GENMASK(6, 4)
-#define SEC_C_BLD_FUNC(x) \
- FIELD_PREP(SEC_C_BLD_FUNC_MASK, (x))
-#define PRIM_A_BLD_FUNC_MASK GENMASK(10, 8)
-#define PRIM_A_BLD_FUNC(x) \
- FIELD_PREP(PRIM_A_BLD_FUNC_MASK, (x))
#define SEC_A_BLD_FUNC_MASK GENMASK(14, 12)
#define SEC_A_BLD_FUNC(x) \
FIELD_PREP(SEC_A_BLD_FUNC_MASK, (x))
+#define PRIM_A_BLD_FUNC_MASK GENMASK(10, 8)
+#define PRIM_A_BLD_FUNC(x) \
+ FIELD_PREP(PRIM_A_BLD_FUNC_MASK, (x))
+#define SEC_C_BLD_FUNC_MASK GENMASK(6, 4)
+#define SEC_C_BLD_FUNC(x) \
+ FIELD_PREP(SEC_C_BLD_FUNC_MASK, (x))
+#define PRIM_C_BLD_FUNC_MASK GENMASK(2, 0)
+#define PRIM_C_BLD_FUNC(x) \
+ FIELD_PREP(PRIM_C_BLD_FUNC_MASK, (x))
#define POSITION 0x14
-#define XPOS_MASK GENMASK(15, 0)
-#define XPOS(x) FIELD_PREP(XPOS_MASK, (x))
#define YPOS_MASK GENMASK(31, 16)
#define YPOS(x) FIELD_PREP(YPOS_MASK, (x))
+#define XPOS_MASK GENMASK(15, 0)
+#define XPOS(x) FIELD_PREP(XPOS_MASK, (x))
enum dc_lb_blend_func {
DC_LAYERBLEND_BLEND_ZERO,
diff --git a/drivers/gpu/drm/imx/dc/dc-plane.c b/drivers/gpu/drm/imx/dc/dc-plane.c
index d8b946fb90de..e40d5d66c5c1 100644
--- a/drivers/gpu/drm/imx/dc/dc-plane.c
+++ b/drivers/gpu/drm/imx/dc/dc-plane.c
@@ -106,7 +106,7 @@ dc_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state)
}
crtc_state =
- drm_atomic_get_existing_crtc_state(state, plane_state->crtc);
+ drm_atomic_get_new_crtc_state(state, plane_state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
diff --git a/drivers/gpu/drm/imx/dcss/dcss-plane.c b/drivers/gpu/drm/imx/dcss/dcss-plane.c
index ab6d32bad756..3a063a53c8df 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-plane.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-plane.c
@@ -159,8 +159,8 @@ static int dcss_plane_atomic_check(struct drm_plane *plane,
dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
WARN_ON(!dma_obj);
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- new_plane_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_plane_state->crtc);
hdisplay = crtc_state->adjusted_mode.hdisplay;
vdisplay = crtc_state->adjusted_mode.vdisplay;
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
index ec5fd9a01f1e..465b5a6ad5bb 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
@@ -17,7 +17,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_fbdev_dma.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
@@ -141,17 +143,32 @@ static int imx_drm_dumb_create(struct drm_file *file_priv,
struct drm_device *drm,
struct drm_mode_create_dumb *args)
{
- u32 width = args->width;
+ u32 fourcc;
+ const struct drm_format_info *info;
+ u64 pitch_align;
int ret;
- args->width = ALIGN(width, 8);
-
- ret = drm_gem_dma_dumb_create(file_priv, drm, args);
+ /*
+ * Hardware requires the framebuffer width to be aligned to
+ * multiples of 8. The mode-setting code handles this, but
+ * the buffer pitch has to be aligned as well. Set the pitch
+ * alignment accordingly, so that the each scanline fits into
+ * the allocated buffer.
+ */
+ fourcc = drm_driver_color_mode_format(drm, args->bpp);
+ if (fourcc == DRM_FORMAT_INVALID)
+ return -EINVAL;
+ info = drm_format_info(fourcc);
+ if (!info)
+ return -EINVAL;
+ pitch_align = drm_format_info_min_pitch(info, 0, SZ_8);
+ if (!pitch_align || pitch_align > U32_MAX)
+ return -EINVAL;
+ ret = drm_mode_size_dumb(drm, args, pitch_align, 0);
if (ret)
return ret;
- args->width = width;
- return ret;
+ return drm_gem_dma_dumb_create(file_priv, drm, args);
}
static const struct drm_driver imx_drm_driver = {
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-tve.c b/drivers/gpu/drm/imx/ipuv3/imx-tve.c
index c5629e155d25..63f23b821b0b 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-tve.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-tve.c
@@ -368,17 +368,20 @@ static unsigned long clk_tve_di_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long clk_tve_di_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_tve_di_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long div;
- div = *prate / rate;
+ div = req->best_parent_rate / req->rate;
if (div >= 4)
- return *prate / 4;
+ req->rate = req->best_parent_rate / 4;
else if (div >= 2)
- return *prate / 2;
- return *prate;
+ req->rate = req->best_parent_rate / 2;
+ else
+ req->rate = req->best_parent_rate;
+
+ return 0;
}
static int clk_tve_di_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -409,7 +412,7 @@ static int clk_tve_di_set_rate(struct clk_hw *hw, unsigned long rate,
}
static const struct clk_ops clk_tve_di_ops = {
- .round_rate = clk_tve_di_round_rate,
+ .determine_rate = clk_tve_di_determine_rate,
.set_rate = clk_tve_di_set_rate,
.recalc_rate = clk_tve_di_recalc_rate,
};
diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
index 704c549750f9..df19560e41b4 100644
--- a/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
@@ -386,8 +386,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
crtc_state =
- drm_atomic_get_existing_crtc_state(state,
- new_state->crtc);
+ drm_atomic_get_new_crtc_state(state, new_state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
index 6d8325c76697..dfdeb926fe9c 100644
--- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
@@ -134,10 +134,10 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge,
struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
struct drm_display_info *di = &conn_state->connector->display_info;
struct drm_bridge_state *next_bridge_state = NULL;
- struct drm_bridge *next_bridge;
u32 bus_flags, bus_fmt;
- next_bridge = drm_bridge_get_next_bridge(bridge);
+ struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge);
+
if (next_bridge)
next_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
next_bridge);
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index 9db1ceaed518..d3213fbf22be 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -247,8 +247,8 @@ static void ingenic_drm_crtc_atomic_enable(struct drm_crtc *crtc,
struct ingenic_drm_private_state *priv_state;
unsigned int next_id;
- priv_state = ingenic_drm_get_priv_state(priv, state);
- if (WARN_ON(IS_ERR(priv_state)))
+ priv_state = ingenic_drm_get_new_priv_state(priv, state);
+ if (WARN_ON(!priv_state))
return;
/* Set addresses of our DMA descriptor chains */
@@ -340,6 +340,7 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc,
crtc);
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
struct drm_plane_state *f1_state, *f0_state, *ipu_state = NULL;
+ struct ingenic_drm_private_state *priv_state;
if (crtc_state->gamma_lut &&
drm_color_lut_size(crtc_state->gamma_lut) != ARRAY_SIZE(priv->dma_hwdescs->palette)) {
@@ -347,6 +348,11 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
+ /* We will need the state in atomic_enable, so let's make sure it's part of the state */
+ priv_state = ingenic_drm_get_priv_state(priv, state);
+ if (IS_ERR(priv_state))
+ return PTR_ERR(priv_state);
+
if (drm_atomic_crtc_needs_modeset(crtc_state) && priv->soc_info->has_osd) {
f1_state = drm_atomic_get_plane_state(crtc_state->state,
&priv->f1);
@@ -471,8 +477,7 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane,
if (priv->soc_info->plane_f0_not_working && plane == &priv->f0)
return -EINVAL;
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c
index 26ebf424d63e..32638a713241 100644
--- a/drivers/gpu/drm/ingenic/ingenic-ipu.c
+++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c
@@ -580,7 +580,7 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane,
if (!crtc)
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
@@ -705,7 +705,7 @@ ingenic_ipu_plane_atomic_set_property(struct drm_plane *plane,
ipu->sharpness = val;
if (state->crtc) {
- crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
index 9e0562aa2bcb..9562fe6711ff 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.c
+++ b/drivers/gpu/drm/kmb/kmb_plane.c
@@ -129,8 +129,7 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
}
can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY);
crtc_state =
- drm_atomic_get_existing_crtc_state(state,
- new_plane_state->crtc);
+ drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
return drm_atomic_helper_check_plane_state(new_plane_state,
crtc_state,
DRM_PLANE_NO_SCALING,
diff --git a/drivers/gpu/drm/logicvc/logicvc_layer.c b/drivers/gpu/drm/logicvc/logicvc_layer.c
index 464000aea765..eab4d773f92b 100644
--- a/drivers/gpu/drm/logicvc/logicvc_layer.c
+++ b/drivers/gpu/drm/logicvc/logicvc_layer.c
@@ -96,8 +96,8 @@ static int logicvc_plane_atomic_check(struct drm_plane *drm_plane,
if (!new_state->crtc)
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(new_state->state,
- new_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+ new_state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
diff --git a/drivers/gpu/drm/loongson/lsdc_gem.c b/drivers/gpu/drm/loongson/lsdc_gem.c
index a720d8f53209..c29dd730a894 100644
--- a/drivers/gpu/drm/loongson/lsdc_gem.c
+++ b/drivers/gpu/drm/loongson/lsdc_gem.c
@@ -6,6 +6,7 @@
#include <linux/dma-buf.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_prime.h>
@@ -57,7 +58,7 @@ static void lsdc_gem_object_free(struct drm_gem_object *obj)
struct ttm_buffer_object *tbo = to_ttm_bo(obj);
if (tbo)
- ttm_bo_put(tbo);
+ ttm_bo_fini(tbo);
}
static int lsdc_gem_object_vmap(struct drm_gem_object *obj, struct iosys_map *map)
@@ -204,45 +205,31 @@ int lsdc_dumb_create(struct drm_file *file, struct drm_device *ddev,
const struct lsdc_desc *descp = ldev->descp;
u32 domain = LSDC_GEM_DOMAIN_VRAM;
struct drm_gem_object *gobj;
- size_t size;
- u32 pitch;
- u32 handle;
int ret;
- if (!args->width || !args->height)
- return -EINVAL;
-
- if (args->bpp != 32 && args->bpp != 16)
- return -EINVAL;
-
- pitch = args->width * args->bpp / 8;
- pitch = ALIGN(pitch, descp->pitch_align);
- size = pitch * args->height;
- size = ALIGN(size, PAGE_SIZE);
+ ret = drm_mode_size_dumb(ddev, args, descp->pitch_align, 0);
+ if (ret)
+ return ret;
/* Maximum single bo size allowed is the half vram size available */
- if (size > ldev->vram_size / 2) {
- drm_err(ddev, "Requesting(%zuMiB) failed\n", size >> 20);
+ if (args->size > ldev->vram_size / 2) {
+ drm_err(ddev, "Requesting(%zuMiB) failed\n", (size_t)(args->size >> PAGE_SHIFT));
return -ENOMEM;
}
- gobj = lsdc_gem_object_create(ddev, domain, size, false, NULL, NULL);
+ gobj = lsdc_gem_object_create(ddev, domain, args->size, false, NULL, NULL);
if (IS_ERR(gobj)) {
drm_err(ddev, "Failed to create gem object\n");
return PTR_ERR(gobj);
}
- ret = drm_gem_handle_create(file, gobj, &handle);
+ ret = drm_gem_handle_create(file, gobj, &args->handle);
/* drop reference from allocate, handle holds it now */
drm_gem_object_put(gobj);
if (ret)
return ret;
- args->pitch = pitch;
- args->size = size;
- args->handle = handle;
-
return 0;
}
diff --git a/drivers/gpu/drm/loongson/lsdc_plane.c b/drivers/gpu/drm/loongson/lsdc_plane.c
index aa9a97f9c4dc..2967a5cca069 100644
--- a/drivers/gpu/drm/loongson/lsdc_plane.c
+++ b/drivers/gpu/drm/loongson/lsdc_plane.c
@@ -196,7 +196,7 @@ static int lsdc_cursor_plane_atomic_async_check(struct drm_plane *plane,
return -EINVAL;
}
- crtc_state = drm_atomic_get_existing_crtc_state(state, new_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
if (!crtc_state->active)
return -EINVAL;
diff --git a/drivers/gpu/drm/mcde/mcde_clk_div.c b/drivers/gpu/drm/mcde/mcde_clk_div.c
index 3056ac566473..8c5af2677357 100644
--- a/drivers/gpu/drm/mcde/mcde_clk_div.c
+++ b/drivers/gpu/drm/mcde/mcde_clk_div.c
@@ -71,12 +71,15 @@ static int mcde_clk_div_choose_div(struct clk_hw *hw, unsigned long rate,
return best_div;
}
-static long mcde_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int mcde_clk_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- int div = mcde_clk_div_choose_div(hw, rate, prate, true);
+ int div = mcde_clk_div_choose_div(hw, req->rate,
+ &req->best_parent_rate, true);
- return DIV_ROUND_UP_ULL(*prate, div);
+ req->rate = DIV_ROUND_UP_ULL(req->best_parent_rate, div);
+
+ return 0;
}
static unsigned long mcde_clk_div_recalc_rate(struct clk_hw *hw,
@@ -132,7 +135,7 @@ static int mcde_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops mcde_clk_div_ops = {
.enable = mcde_clk_div_enable,
.recalc_rate = mcde_clk_div_recalc_rate,
- .round_rate = mcde_clk_div_round_rate,
+ .determine_rate = mcde_clk_div_determine_rate,
.set_rate = mcde_clk_div_set_rate,
};
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c
index 02349bd44001..1b5667ddbb03 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_plane.c
@@ -122,7 +122,8 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
if (ret)
return ret;
- crtc_state = drm_atomic_get_existing_crtc_state(state, new_plane_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_plane_state->crtc);
return drm_atomic_helper_check_plane_state(plane->state, crtc_state,
DRM_PLANE_NO_SCALING,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 7c790406d533..4ca183fb61a9 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -336,8 +336,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
if (!crtc)
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
@@ -373,8 +372,8 @@ static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
int min_scale, max_scale;
int ret;
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- new_plane_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_plane_state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 07d8cdd6bb2e..688705a871cf 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -10,8 +10,10 @@
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_prime.h>
#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
#include <trace/events/gpu_mem.h>
@@ -698,8 +700,29 @@ void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm)
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- args->pitch = align_pitch(args->width, args->bpp);
- args->size = PAGE_ALIGN(args->pitch * args->height);
+ u32 fourcc;
+ const struct drm_format_info *info;
+ u64 pitch_align;
+ int ret;
+
+ /*
+ * Adreno needs pitch aligned to 32 pixels. Compute the number
+ * of bytes for a block of 32 pixels at the given color format.
+ * Use the result as pitch alignment.
+ */
+ fourcc = drm_driver_color_mode_format(dev, args->bpp);
+ if (fourcc == DRM_FORMAT_INVALID)
+ return -EINVAL;
+ info = drm_format_info(fourcc);
+ if (!info)
+ return -EINVAL;
+ pitch_align = drm_format_info_min_pitch(info, 0, SZ_32);
+ if (!pitch_align || pitch_align > U32_MAX)
+ return -EINVAL;
+ ret = drm_mode_size_dumb(dev, args, pitch_align, 0);
+ if (ret)
+ return ret;
+
return msm_gem_new_handle(dev, file, args->size,
MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index c88776d1e784..3b5757aed9c8 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -28,6 +28,7 @@ config DRM_NOUVEAU
select THERMAL if ACPI && X86
select ACPI_VIDEO if ACPI && X86
select SND_HDA_COMPONENT if SND_HDA_CORE
+ select PM_DEVFREQ if ARCH_TEGRA
help
Choose this option for open-source NVIDIA support.
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
index 22f74fc88cd7..57bc542780bb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -9,6 +9,8 @@ struct nvkm_device_tegra {
struct nvkm_device device;
struct platform_device *pdev;
+ void __iomem *regs;
+
struct reset_control *rst;
struct clk *clk;
struct clk *clk_ref;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index d5d8877064a7..6a09d397c651 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -134,4 +134,5 @@ int gf100_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
int gk104_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
int gk20a_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
int gm20b_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int gp10b_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index d59fd12268b9..6c26beeb427f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -57,7 +57,7 @@ nouveau_bo(struct ttm_buffer_object *bo)
static inline void
nouveau_bo_fini(struct nouveau_bo *bo)
{
- ttm_bo_put(&bo->bo);
+ ttm_bo_fini(&bo->bo);
}
extern struct ttm_device_funcs nouveau_bo_driver;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 805d0a87aa54..00515623a2cc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -30,6 +30,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_client_event.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
@@ -764,7 +765,7 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
{
struct nouveau_display *disp = nouveau_display(dev);
- drm_client_dev_suspend(dev, false);
+ drm_client_dev_suspend(dev);
if (drm_drv_uses_atomic_modeset(dev)) {
if (!runtime) {
@@ -795,7 +796,7 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
}
}
- drm_client_dev_resume(dev, false);
+ drm_client_dev_resume(dev);
}
int
@@ -807,9 +808,9 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
uint32_t domain;
int ret;
- args->pitch = roundup(args->width * (args->bpp / 8), 256);
- args->size = args->pitch * args->height;
- args->size = roundup(args->size, PAGE_SIZE);
+ ret = drm_mode_size_dumb(dev, args, SZ_256, 0);
+ if (ret)
+ return ret;
/* Use VRAM if there is any ; otherwise fallback to system memory */
if (nouveau_drm(dev)->client.device.info.ram_size != 0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 690e10fbf0bd..395d92ab6271 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -87,7 +87,7 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
return;
}
- ttm_bo_put(&nvbo->bo);
+ ttm_bo_fini(&nvbo->bo);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 8d5853deeee4..9fd351273236 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -21,6 +21,8 @@
*/
#include "nouveau_platform.h"
+#include <nvkm/subdev/clk/gk20a_devfreq.h>
+
static int nouveau_platform_probe(struct platform_device *pdev)
{
const struct nvkm_device_tegra_func *func;
@@ -40,6 +42,21 @@ static void nouveau_platform_remove(struct platform_device *pdev)
nouveau_drm_device_remove(drm);
}
+#ifdef CONFIG_PM_SLEEP
+static int nouveau_platform_suspend(struct device *dev)
+{
+ return gk20a_devfreq_suspend(dev);
+}
+
+static int nouveau_platform_resume(struct device *dev)
+{
+ return gk20a_devfreq_resume(dev);
+}
+
+static SIMPLE_DEV_PM_OPS(nouveau_pm_ops, nouveau_platform_suspend,
+ nouveau_platform_resume);
+#endif
+
#if IS_ENABLED(CONFIG_OF)
static const struct nvkm_device_tegra_func gk20a_platform_data = {
.iommu_bit = 34,
@@ -81,6 +98,9 @@ struct platform_driver nouveau_platform_driver = {
.driver = {
.name = "nouveau",
.of_match_table = of_match_ptr(nouveau_platform_match),
+#ifdef CONFIG_PM_SLEEP
+ .pm = &nouveau_pm_ops,
+#endif
},
.probe = nouveau_platform_probe,
.remove = nouveau_platform_remove,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 3375a59ebf1a..2517b65d8faa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2280,6 +2280,7 @@ nv13b_chipset = {
.acr = { 0x00000001, gp10b_acr_new },
.bar = { 0x00000001, gm20b_bar_new },
.bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gp10b_clk_new },
.fault = { 0x00000001, gp10b_fault_new },
.fb = { 0x00000001, gp10b_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 114e50ca1827..03aa6f09ec89 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -259,6 +259,10 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
tdev->func = func;
tdev->pdev = pdev;
+ tdev->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(tdev->regs))
+ return PTR_ERR(tdev->regs);
+
if (func->require_vdd) {
tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
if (IS_ERR(tdev->vdd)) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
index dcecd499d8df..be8f3283ee16 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
@@ -10,6 +10,8 @@ nvkm-y += nvkm/subdev/clk/gf100.o
nvkm-y += nvkm/subdev/clk/gk104.o
nvkm-y += nvkm/subdev/clk/gk20a.o
nvkm-y += nvkm/subdev/clk/gm20b.o
+nvkm-y += nvkm/subdev/clk/gp10b.o
+nvkm-$(CONFIG_PM_DEVFREQ) += nvkm/subdev/clk/gk20a_devfreq.o
nvkm-y += nvkm/subdev/clk/pllnv04.o
nvkm-y += nvkm/subdev/clk/pllgt215.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
index d573fb0917fc..65f5d0f1f3bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
@@ -23,6 +23,7 @@
*
*/
#include "priv.h"
+#include "gk20a_devfreq.h"
#include "gk20a.h"
#include <core/tegra.h>
@@ -589,6 +590,10 @@ gk20a_clk_init(struct nvkm_clk *base)
return ret;
}
+ ret = gk20a_devfreq_init(base, &clk->devfreq);
+ if (ret)
+ return ret;
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
index 286413ff4a9e..ea5b0bab4cce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
@@ -118,6 +118,7 @@ struct gk20a_clk {
const struct gk20a_clk_pllg_params *params;
struct gk20a_pll pll;
u32 parent_rate;
+ struct gk20a_devfreq *devfreq;
u32 (*div_to_pl)(u32);
u32 (*pl_to_div)(u32);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.c
new file mode 100644
index 000000000000..41003cbcdbfa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: MIT
+#include <linux/clk.h>
+#include <linux/math64.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+
+#include <drm/drm_managed.h>
+
+#include <subdev/clk.h>
+
+#include "nouveau_drv.h"
+#include "nouveau_chan.h"
+#include "priv.h"
+#include "gk20a_devfreq.h"
+#include "gk20a.h"
+#include "gp10b.h"
+
+#define PMU_BUSY_CYCLES_NORM_MAX 1000U
+
+#define PWR_PMU_IDLE_COUNTER_TOTAL 0U
+#define PWR_PMU_IDLE_COUNTER_BUSY 4U
+
+#define PWR_PMU_IDLE_COUNT_REG_OFFSET 0x0010A508U
+#define PWR_PMU_IDLE_COUNT_REG_SIZE 16U
+#define PWR_PMU_IDLE_COUNT_MASK 0x7FFFFFFFU
+#define PWR_PMU_IDLE_COUNT_RESET_VALUE (0x1U << 31U)
+
+#define PWR_PMU_IDLE_INTR_REG_OFFSET 0x0010A9E8U
+#define PWR_PMU_IDLE_INTR_ENABLE_VALUE 0U
+
+#define PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET 0x0010A9ECU
+#define PWR_PMU_IDLE_INTR_STATUS_MASK 0x00000001U
+#define PWR_PMU_IDLE_INTR_STATUS_RESET_VALUE 0x1U
+
+#define PWR_PMU_IDLE_THRESHOLD_REG_OFFSET 0x0010A8A0U
+#define PWR_PMU_IDLE_THRESHOLD_REG_SIZE 4U
+#define PWR_PMU_IDLE_THRESHOLD_MAX_VALUE 0x7FFFFFFFU
+
+#define PWR_PMU_IDLE_CTRL_REG_OFFSET 0x0010A50CU
+#define PWR_PMU_IDLE_CTRL_REG_SIZE 16U
+#define PWR_PMU_IDLE_CTRL_VALUE_MASK 0x3U
+#define PWR_PMU_IDLE_CTRL_VALUE_BUSY 0x2U
+#define PWR_PMU_IDLE_CTRL_VALUE_ALWAYS 0x3U
+#define PWR_PMU_IDLE_CTRL_FILTER_MASK (0x1U << 2)
+#define PWR_PMU_IDLE_CTRL_FILTER_DISABLED 0x0U
+
+#define PWR_PMU_IDLE_MASK_REG_OFFSET 0x0010A504U
+#define PWR_PMU_IDLE_MASK_REG_SIZE 16U
+#define PWM_PMU_IDLE_MASK_GR_ENABLED 0x1U
+#define PWM_PMU_IDLE_MASK_CE_2_ENABLED 0x200000U
+
+/**
+ * struct gk20a_devfreq - Device frequency management
+ */
+struct gk20a_devfreq {
+ /** @devfreq: devfreq device. */
+ struct devfreq *devfreq;
+
+ /** @regs: Device registers. */
+ void __iomem *regs;
+
+ /** @gov_data: Governor data. */
+ struct devfreq_simple_ondemand_data gov_data;
+
+ /** @busy_time: Busy time. */
+ ktime_t busy_time;
+
+ /** @total_time: Total time. */
+ ktime_t total_time;
+
+ /** @time_last_update: Last update time. */
+ ktime_t time_last_update;
+};
+
+static struct gk20a_devfreq *dev_to_gk20a_devfreq(struct device *dev)
+{
+ struct nouveau_drm *drm = dev_get_drvdata(dev);
+ struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0);
+ struct nvkm_clk *base = nvkm_clk(subdev);
+
+ switch (drm->nvkm->chipset) {
+ case 0x13b: return gp10b_clk(base)->devfreq; break;
+ default: return gk20a_clk(base)->devfreq; break;
+ }
+}
+
+static void gk20a_pmu_init_perfmon_counter(struct gk20a_devfreq *gdevfreq)
+{
+ u32 data;
+
+ // Set pmu idle intr status bit on total counter overflow
+ writel(PWR_PMU_IDLE_INTR_ENABLE_VALUE,
+ gdevfreq->regs + PWR_PMU_IDLE_INTR_REG_OFFSET);
+
+ writel(PWR_PMU_IDLE_THRESHOLD_MAX_VALUE,
+ gdevfreq->regs + PWR_PMU_IDLE_THRESHOLD_REG_OFFSET +
+ (PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_THRESHOLD_REG_SIZE));
+
+ // Setup counter for total cycles
+ data = readl(gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET +
+ (PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_CTRL_REG_SIZE));
+ data &= ~(PWR_PMU_IDLE_CTRL_VALUE_MASK | PWR_PMU_IDLE_CTRL_FILTER_MASK);
+ data |= PWR_PMU_IDLE_CTRL_VALUE_ALWAYS | PWR_PMU_IDLE_CTRL_FILTER_DISABLED;
+ writel(data, gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET +
+ (PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_CTRL_REG_SIZE));
+
+ // Setup counter for busy cycles
+ writel(PWM_PMU_IDLE_MASK_GR_ENABLED | PWM_PMU_IDLE_MASK_CE_2_ENABLED,
+ gdevfreq->regs + PWR_PMU_IDLE_MASK_REG_OFFSET +
+ (PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_MASK_REG_SIZE));
+
+ data = readl(gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET +
+ (PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_CTRL_REG_SIZE));
+ data &= ~(PWR_PMU_IDLE_CTRL_VALUE_MASK | PWR_PMU_IDLE_CTRL_FILTER_MASK);
+ data |= PWR_PMU_IDLE_CTRL_VALUE_BUSY | PWR_PMU_IDLE_CTRL_FILTER_DISABLED;
+ writel(data, gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET +
+ (PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_CTRL_REG_SIZE));
+}
+
+static u32 gk20a_pmu_read_idle_counter(struct gk20a_devfreq *gdevfreq, u32 counter_id)
+{
+ u32 ret;
+
+ ret = readl(gdevfreq->regs + PWR_PMU_IDLE_COUNT_REG_OFFSET +
+ (counter_id * PWR_PMU_IDLE_COUNT_REG_SIZE));
+
+ return ret & PWR_PMU_IDLE_COUNT_MASK;
+}
+
+static void gk20a_pmu_reset_idle_counter(struct gk20a_devfreq *gdevfreq, u32 counter_id)
+{
+ writel(PWR_PMU_IDLE_COUNT_RESET_VALUE, gdevfreq->regs + PWR_PMU_IDLE_COUNT_REG_OFFSET +
+ (counter_id * PWR_PMU_IDLE_COUNT_REG_SIZE));
+}
+
+static u32 gk20a_pmu_read_idle_intr_status(struct gk20a_devfreq *gdevfreq)
+{
+ u32 ret;
+
+ ret = readl(gdevfreq->regs + PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET);
+
+ return ret & PWR_PMU_IDLE_INTR_STATUS_MASK;
+}
+
+static void gk20a_pmu_clear_idle_intr_status(struct gk20a_devfreq *gdevfreq)
+{
+ writel(PWR_PMU_IDLE_INTR_STATUS_RESET_VALUE,
+ gdevfreq->regs + PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET);
+}
+
+static void gk20a_devfreq_update_utilization(struct gk20a_devfreq *gdevfreq)
+{
+ ktime_t now, last;
+ u64 busy_cycles, total_cycles;
+ u32 norm, intr_status;
+
+ now = ktime_get();
+ last = gdevfreq->time_last_update;
+ gdevfreq->total_time = ktime_us_delta(now, last);
+
+ busy_cycles = gk20a_pmu_read_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY);
+ total_cycles = gk20a_pmu_read_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL);
+ intr_status = gk20a_pmu_read_idle_intr_status(gdevfreq);
+
+ gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY);
+ gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL);
+
+ if (intr_status != 0UL) {
+ norm = PMU_BUSY_CYCLES_NORM_MAX;
+ gk20a_pmu_clear_idle_intr_status(gdevfreq);
+ } else if (total_cycles == 0ULL || busy_cycles > total_cycles) {
+ norm = PMU_BUSY_CYCLES_NORM_MAX;
+ } else {
+ norm = (u32)div64_u64(busy_cycles * PMU_BUSY_CYCLES_NORM_MAX,
+ total_cycles);
+ }
+
+ gdevfreq->busy_time = div_u64(gdevfreq->total_time * norm, PMU_BUSY_CYCLES_NORM_MAX);
+ gdevfreq->time_last_update = now;
+}
+
+static int gk20a_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct nouveau_drm *drm = dev_get_drvdata(dev);
+ struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0);
+ struct nvkm_clk *base = nvkm_clk(subdev);
+ struct nvkm_pstate *pstates = base->func->pstates;
+ int nr_pstates = base->func->nr_pstates;
+ int i, ret;
+
+ for (i = 0; i < nr_pstates - 1; i++)
+ if (pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV >= *freq)
+ break;
+
+ ret = nvkm_clk_ustate(base, pstates[i].pstate, 0);
+ ret |= nvkm_clk_ustate(base, pstates[i].pstate, 1);
+ if (ret) {
+ nvkm_error(subdev, "cannot update clock\n");
+ return ret;
+ }
+
+ *freq = pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV;
+
+ return 0;
+}
+
+static int gk20a_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct nouveau_drm *drm = dev_get_drvdata(dev);
+ struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0);
+ struct nvkm_clk *base = nvkm_clk(subdev);
+
+ *freq = nvkm_clk_read(base, nv_clk_src_gpc) * GK20A_CLK_GPC_MDIV;
+
+ return 0;
+}
+
+static void gk20a_devfreq_reset(struct gk20a_devfreq *gdevfreq)
+{
+ gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY);
+ gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL);
+ gk20a_pmu_clear_idle_intr_status(gdevfreq);
+
+ gdevfreq->busy_time = 0;
+ gdevfreq->total_time = 0;
+ gdevfreq->time_last_update = ktime_get();
+}
+
+static int gk20a_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *status)
+{
+ struct nouveau_drm *drm = dev_get_drvdata(dev);
+ struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev);
+
+ gk20a_devfreq_get_cur_freq(dev, &status->current_frequency);
+
+ gk20a_devfreq_update_utilization(gdevfreq);
+
+ status->busy_time = ktime_to_ns(gdevfreq->busy_time);
+ status->total_time = ktime_to_ns(gdevfreq->total_time);
+
+ gk20a_devfreq_reset(gdevfreq);
+
+ NV_DEBUG(drm, "busy %lu total %lu %lu %% freq %lu MHz\n",
+ status->busy_time, status->total_time,
+ status->busy_time / (status->total_time / 100),
+ status->current_frequency / 1000 / 1000);
+
+ return 0;
+}
+
+static struct devfreq_dev_profile gk20a_devfreq_profile = {
+ .timer = DEVFREQ_TIMER_DELAYED,
+ .polling_ms = 50,
+ .target = gk20a_devfreq_target,
+ .get_cur_freq = gk20a_devfreq_get_cur_freq,
+ .get_dev_status = gk20a_devfreq_get_dev_status,
+};
+
+int gk20a_devfreq_init(struct nvkm_clk *base, struct gk20a_devfreq **gdevfreq)
+{
+ struct nvkm_device *device = base->subdev.device;
+ struct nouveau_drm *drm = dev_get_drvdata(device->dev);
+ struct nvkm_device_tegra *tdev = device->func->tegra(device);
+ struct nvkm_pstate *pstates = base->func->pstates;
+ int nr_pstates = base->func->nr_pstates;
+ struct gk20a_devfreq *new_gdevfreq;
+ int i;
+
+ new_gdevfreq = drmm_kzalloc(drm->dev, sizeof(struct gk20a_devfreq), GFP_KERNEL);
+ if (!new_gdevfreq)
+ return -ENOMEM;
+
+ new_gdevfreq->regs = tdev->regs;
+
+ for (i = 0; i < nr_pstates; i++)
+ dev_pm_opp_add(base->subdev.device->dev,
+ pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV, 0);
+
+ gk20a_pmu_init_perfmon_counter(new_gdevfreq);
+ gk20a_devfreq_reset(new_gdevfreq);
+
+ gk20a_devfreq_profile.initial_freq =
+ nvkm_clk_read(base, nv_clk_src_gpc) * GK20A_CLK_GPC_MDIV;
+
+ new_gdevfreq->gov_data.upthreshold = 45;
+ new_gdevfreq->gov_data.downdifferential = 5;
+
+ new_gdevfreq->devfreq = devm_devfreq_add_device(device->dev,
+ &gk20a_devfreq_profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ &new_gdevfreq->gov_data);
+ if (IS_ERR(new_gdevfreq->devfreq))
+ return PTR_ERR(new_gdevfreq->devfreq);
+
+ *gdevfreq = new_gdevfreq;
+
+ return 0;
+}
+
+int gk20a_devfreq_resume(struct device *dev)
+{
+ struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev);
+
+ if (!gdevfreq || !gdevfreq->devfreq)
+ return 0;
+
+ return devfreq_resume_device(gdevfreq->devfreq);
+}
+
+int gk20a_devfreq_suspend(struct device *dev)
+{
+ struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev);
+
+ if (!gdevfreq || !gdevfreq->devfreq)
+ return 0;
+
+ return devfreq_suspend_device(gdevfreq->devfreq);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.h
new file mode 100644
index 000000000000..5b7ca8a7a5cd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __GK20A_DEVFREQ_H__
+#define __GK20A_DEVFREQ_H__
+
+#include <linux/devfreq.h>
+
+struct gk20a_devfreq;
+
+#if defined(CONFIG_PM_DEVFREQ)
+int gk20a_devfreq_init(struct nvkm_clk *base, struct gk20a_devfreq **devfreq);
+
+int gk20a_devfreq_resume(struct device *dev);
+int gk20a_devfreq_suspend(struct device *dev);
+#else
+static inline int gk20a_devfreq_init(struct nvkm_clk *base, struct gk20a_devfreq **devfreq)
+{
+ return 0;
+}
+
+static inline int gk20a_devfreq_resume(struct device dev) { return 0; }
+static inline int gk20a_devfreq_suspend(struct device *dev) { return 0; }
+#endif /* CONFIG_PM_DEVFREQ */
+
+#endif /* __GK20A_DEVFREQ_H__ */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
index 7c33542f651b..fa8ca53acbd1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
@@ -27,6 +27,7 @@
#include <core/tegra.h>
#include "priv.h"
+#include "gk20a_devfreq.h"
#include "gk20a.h"
#define GPCPLL_CFG_SYNC_MODE BIT(2)
@@ -869,6 +870,10 @@ gm20b_clk_init(struct nvkm_clk *base)
return ret;
}
+ ret = gk20a_devfreq_init(base, &clk->devfreq);
+ if (ret)
+ return ret;
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.c
new file mode 100644
index 000000000000..492b62c0ee96
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: MIT
+#include <subdev/clk.h>
+#include <subdev/timer.h>
+#include <core/device.h>
+#include <core/tegra.h>
+
+#include "priv.h"
+#include "gk20a_devfreq.h"
+#include "gk20a.h"
+#include "gp10b.h"
+
+static int
+gp10b_clk_init(struct nvkm_clk *base)
+{
+ struct gp10b_clk *clk = gp10b_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ int ret;
+
+ /* Start with the highest frequency, matching the BPMP default */
+ base->func->calc(base, &base->func->pstates[base->func->nr_pstates - 1].base);
+ ret = base->func->prog(base);
+ if (ret) {
+ nvkm_error(subdev, "cannot initialize clock\n");
+ return ret;
+ }
+
+ ret = gk20a_devfreq_init(base, &clk->devfreq);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+gp10b_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
+{
+ struct gp10b_clk *clk = gp10b_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+
+ switch (src) {
+ case nv_clk_src_gpc:
+ return clk_get_rate(clk->clk) / GK20A_CLK_GPC_MDIV;
+ default:
+ nvkm_error(subdev, "invalid clock source %d\n", src);
+ return -EINVAL;
+ }
+}
+
+static int
+gp10b_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
+{
+ struct gp10b_clk *clk = gp10b_clk(base);
+ u32 target_rate = cstate->domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV;
+
+ clk->new_rate = clk_round_rate(clk->clk, target_rate) / GK20A_CLK_GPC_MDIV;
+
+ return 0;
+}
+
+static int
+gp10b_clk_prog(struct nvkm_clk *base)
+{
+ struct gp10b_clk *clk = gp10b_clk(base);
+ int ret;
+
+ ret = clk_set_rate(clk->clk, clk->new_rate * GK20A_CLK_GPC_MDIV);
+ if (ret < 0)
+ return ret;
+
+ clk->rate = clk_get_rate(clk->clk) / GK20A_CLK_GPC_MDIV;
+
+ return 0;
+}
+
+static struct nvkm_pstate
+gp10b_pstates[] = {
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 114750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 216750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 318750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 420750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 522750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 624750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 726750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 828750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 930750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 1032750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 1134750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 1236750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 1300500,
+ },
+ },
+};
+
+static const struct nvkm_clk_func
+gp10b_clk = {
+ .init = gp10b_clk_init,
+ .read = gp10b_clk_read,
+ .calc = gp10b_clk_calc,
+ .prog = gp10b_clk_prog,
+ .tidy = gk20a_clk_tidy,
+ .pstates = gp10b_pstates,
+ .nr_pstates = ARRAY_SIZE(gp10b_pstates),
+ .domains = {
+ { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
+ { nv_clk_src_max }
+ }
+};
+
+int
+gp10b_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
+{
+ struct nvkm_device_tegra *tdev = device->func->tegra(device);
+ const struct nvkm_clk_func *func = &gp10b_clk;
+ struct gp10b_clk *clk;
+ int ret, i;
+
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+ if (!clk)
+ return -ENOMEM;
+ *pclk = &clk->base;
+ clk->clk = tdev->clk;
+
+ /* Finish initializing the pstates */
+ for (i = 0; i < func->nr_pstates; i++) {
+ INIT_LIST_HEAD(&func->pstates[i].list);
+ func->pstates[i].pstate = i + 1;
+ }
+
+ ret = nvkm_clk_ctor(func, device, type, inst, true, &clk->base);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.h
new file mode 100644
index 000000000000..178e3bcdbbf7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_CLK_GP10B_H__
+#define __NVKM_CLK_GP10B_H__
+
+struct gp10b_clk {
+ /* currently applied parameters */
+ struct nvkm_clk base;
+ struct gk20a_devfreq *devfreq;
+ struct clk *clk;
+ u32 rate;
+
+ /* new parameters to apply */
+ u32 new_rate;
+};
+
+#define gp10b_clk(p) container_of((p), struct gp10b_clk, base)
+
+#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 4dd05bc732da..195715b162e3 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -77,7 +77,6 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
struct omap_dss_device *output = omap_encoder->output;
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
- struct drm_bridge *bridge;
struct videomode vm = { 0 };
u32 bus_flags;
@@ -97,8 +96,7 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
*
* A better solution is to use DRM's bus-flags through the whole driver.
*/
- for (bridge = output->bridge; bridge;
- bridge = drm_bridge_get_next_bridge(bridge)) {
+ drm_for_each_bridge_in_chain_from(output->bridge, bridge) {
if (!bridge->timings)
continue;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 381552bfb409..78563a8d8732 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -10,6 +10,7 @@
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_prime.h>
#include <drm/drm_vma_manager.h>
@@ -580,15 +581,13 @@ static int omap_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struc
int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- union omap_gem_size gsize;
-
- args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
-
- args->size = PAGE_ALIGN(args->pitch * args->height);
+ union omap_gem_size gsize = { };
+ int ret;
- gsize = (union omap_gem_size){
- .bytes = args->size,
- };
+ ret = drm_mode_size_dumb(dev, args, SZ_8, 0);
+ if (ret)
+ return ret;
+ gsize.bytes = args->size;
return omap_gem_new_handle(dev, file, gsize,
OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 24a2ded08b45..d74ef6694c10 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -229,7 +229,7 @@ static int omap_plane_atomic_check(struct drm_plane *plane,
if (!crtc)
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
/* we should have a crtc state if the plane is attached to a crtc */
if (WARN_ON(!crtc_state))
return 0;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 407c5f6a268b..045ffb2ccd0f 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -888,6 +888,21 @@ config DRM_PANEL_SEIKO_43WVF1G
Say Y here if you want to enable support for the Seiko
43WVF1G controller for 800x480 LCD panels
+config DRM_PANEL_SHARP_LQ079L1SX01
+ tristate "Sharp LQ079L1SX01 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
+ help
+ Say Y here if you want to enable support for Sharp LQ079L1SX01
+ TFT-LCD modules. The panel has a 2560x1600 resolution and uses
+ 24 bit RGB per pixel. It provides a dual MIPI DSI interface to
+ the host.
+
+ To compile this driver as a module, choose M here: the module
+ will be called panel-sharp-lq079l1sx01.
+
config DRM_PANEL_SHARP_LQ101R1SX01
tristate "Sharp LQ101R1SX01 panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 3615a761b44f..0356775a443a 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -91,6 +91,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA5X01_AMS561RA01) += panel-samsung-s6e8aa5x01-ams561ra01.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_SOFEF00) += panel-samsung-sofef00.o
obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o
+obj-$(CONFIG_DRM_PANEL_SHARP_LQ079L1SX01) += panel-sharp-lq079l1sx01.o
obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
obj-$(CONFIG_DRM_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 62435e3cd9f4..944c7c70de55 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -1888,6 +1888,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x203d, &delay_200_500_e50, "B140HTN02.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x205c, &delay_200_500_e50, "B116XAN02.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x208d, &delay_200_500_e50, "B140HTN02.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x235c, &delay_200_500_e50, "B116XTN02.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"),
@@ -1909,6 +1910,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x8bba, &delay_200_500_e50, "B140UAN08.5"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xa199, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xa7b3, &delay_200_500_e50, "B140UAN04.4"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0xb7a9, &delay_200_500_e50, "B140HAK03.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xc4b4, &delay_200_500_e50, "B116XAT04.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xc9a8, &delay_200_500_e50, "B140QAN08.H"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xcdba, &delay_200_500_e50, "B140UAX01.2"),
@@ -1974,6 +1976,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c93, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cf2, &delay_200_500_e200, "NV156FHM-N4S"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cf6, &delay_200_500_e200, "NV140WUM-N64"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cfa, &delay_200_500_e50, "NV116WHM-A4D"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0d45, &delay_200_500_e80, "NV116WHM-N4B"),
@@ -2007,10 +2010,12 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1441, &delay_200_500_e80_d50, "N140JCA-ELK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x144f, &delay_200_500_e80_d50, "N140HGA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1468, &delay_200_500_e80, "N140HGA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x148f, &delay_200_500_e80, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14a8, &delay_200_500_e80, "N140JCA-ELP"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1565, &delay_200_500_e80, "N156HCA-EAB"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x162b, &delay_200_500_e80_d50, "N160JCE-ELL"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x7402, &delay_200_500_e200_d50, "N116BCA-EAK"),
@@ -2022,10 +2027,12 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50_d100, "MNB601LS1-4"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x143f, &delay_200_500_e50, "MNE007QS3-6"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1448, &delay_200_500_e50, "MNE007QS3-7"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x144b, &delay_200_500_e80, "MNE001BS1-4"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1457, &delay_80_500_e80_p2e200, "MNE007QS3-8"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1462, &delay_200_500_e50, "MNE007QS5-2"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1468, &delay_200_500_e50, "MNE007QB2-2"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x146e, &delay_80_500_e50_d50, "MNE007QB3-1"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1519, &delay_200_500_e80_d50, "MNF601BS1-3"),
EDP_PANEL_ENTRY('E', 'T', 'C', 0x0000, &delay_50_500_e200_d200_po2e335, "LP079QX1-SP0V"),
@@ -2046,6 +2053,8 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1212, &delay_200_500_e50, "KD116N0930A16"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1707, &delay_200_150_e50, "KD116N2130B12"),
+ EDP_PANEL_ENTRY('K', 'D', 'C', 0x0110, &delay_200_500_e50, "KD116N3730A07"),
+ EDP_PANEL_ENTRY('K', 'D', 'C', 0x0397, &delay_200_500_e50, "KD116N3730A12"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x044f, &delay_200_500_e50, "KD116N9-30NH-F3"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x05f1, &delay_200_500_e80_d50, "KD116N5-30NV-G7"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x0809, &delay_200_500_e50, "KD116N2930A15"),
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index ad4993b2f92a..7ecb81225981 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -100,7 +100,7 @@ static const struct ili9881c_instr lhr050h41_init[] = {
ILI9881C_COMMAND_INSTR(0x13, 0x00),
ILI9881C_COMMAND_INSTR(0x14, 0x00),
ILI9881C_COMMAND_INSTR(0x15, 0x00),
- ILI9881C_COMMAND_INSTR(0x16, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x16, 0x0c),
ILI9881C_COMMAND_INSTR(0x17, 0x00),
ILI9881C_COMMAND_INSTR(0x18, 0x00),
ILI9881C_COMMAND_INSTR(0x19, 0x00),
@@ -108,7 +108,7 @@ static const struct ili9881c_instr lhr050h41_init[] = {
ILI9881C_COMMAND_INSTR(0x1b, 0x00),
ILI9881C_COMMAND_INSTR(0x1c, 0x00),
ILI9881C_COMMAND_INSTR(0x1d, 0x00),
- ILI9881C_COMMAND_INSTR(0x1e, 0xC0),
+ ILI9881C_COMMAND_INSTR(0x1e, 0xc0),
ILI9881C_COMMAND_INSTR(0x1f, 0x80),
ILI9881C_COMMAND_INSTR(0x20, 0x04),
ILI9881C_COMMAND_INSTR(0x21, 0x01),
@@ -134,7 +134,7 @@ static const struct ili9881c_instr lhr050h41_init[] = {
ILI9881C_COMMAND_INSTR(0x35, 0x00),
ILI9881C_COMMAND_INSTR(0x36, 0x00),
ILI9881C_COMMAND_INSTR(0x37, 0x00),
- ILI9881C_COMMAND_INSTR(0x38, 0x3C),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3c),
ILI9881C_COMMAND_INSTR(0x39, 0x00),
ILI9881C_COMMAND_INSTR(0x3a, 0x00),
ILI9881C_COMMAND_INSTR(0x3b, 0x00),
@@ -173,11 +173,11 @@ static const struct ili9881c_instr lhr050h41_init[] = {
ILI9881C_COMMAND_INSTR(0x67, 0x02),
ILI9881C_COMMAND_INSTR(0x68, 0x02),
ILI9881C_COMMAND_INSTR(0x69, 0x02),
- ILI9881C_COMMAND_INSTR(0x6a, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x0c),
ILI9881C_COMMAND_INSTR(0x6b, 0x02),
- ILI9881C_COMMAND_INSTR(0x6c, 0x0F),
- ILI9881C_COMMAND_INSTR(0x6d, 0x0E),
- ILI9881C_COMMAND_INSTR(0x6e, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x0d),
ILI9881C_COMMAND_INSTR(0x6f, 0x06),
ILI9881C_COMMAND_INSTR(0x70, 0x07),
ILI9881C_COMMAND_INSTR(0x71, 0x02),
@@ -195,74 +195,74 @@ static const struct ili9881c_instr lhr050h41_init[] = {
ILI9881C_COMMAND_INSTR(0x7d, 0x02),
ILI9881C_COMMAND_INSTR(0x7e, 0x02),
ILI9881C_COMMAND_INSTR(0x7f, 0x02),
- ILI9881C_COMMAND_INSTR(0x80, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x80, 0x0c),
ILI9881C_COMMAND_INSTR(0x81, 0x02),
- ILI9881C_COMMAND_INSTR(0x82, 0x0F),
- ILI9881C_COMMAND_INSTR(0x83, 0x0E),
- ILI9881C_COMMAND_INSTR(0x84, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x82, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x83, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x84, 0x0d),
ILI9881C_COMMAND_INSTR(0x85, 0x06),
ILI9881C_COMMAND_INSTR(0x86, 0x07),
ILI9881C_COMMAND_INSTR(0x87, 0x02),
ILI9881C_COMMAND_INSTR(0x88, 0x02),
ILI9881C_COMMAND_INSTR(0x89, 0x02),
- ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8a, 0x02),
ILI9881C_SWITCH_PAGE_INSTR(4),
- ILI9881C_COMMAND_INSTR(0x6C, 0x15),
- ILI9881C_COMMAND_INSTR(0x6E, 0x22),
- ILI9881C_COMMAND_INSTR(0x6F, 0x33),
- ILI9881C_COMMAND_INSTR(0x3A, 0xA4),
- ILI9881C_COMMAND_INSTR(0x8D, 0x0D),
- ILI9881C_COMMAND_INSTR(0x87, 0xBA),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x22),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x33),
+ ILI9881C_COMMAND_INSTR(0x3a, 0xa4),
+ ILI9881C_COMMAND_INSTR(0x8d, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x87, 0xba),
ILI9881C_COMMAND_INSTR(0x26, 0x76),
- ILI9881C_COMMAND_INSTR(0xB2, 0xD1),
+ ILI9881C_COMMAND_INSTR(0xb2, 0xd1),
ILI9881C_SWITCH_PAGE_INSTR(1),
- ILI9881C_COMMAND_INSTR(0x22, 0x0A),
- ILI9881C_COMMAND_INSTR(0x53, 0xDC),
- ILI9881C_COMMAND_INSTR(0x55, 0xA7),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x53, 0xdc),
+ ILI9881C_COMMAND_INSTR(0x55, 0xa7),
ILI9881C_COMMAND_INSTR(0x50, 0x78),
ILI9881C_COMMAND_INSTR(0x51, 0x78),
ILI9881C_COMMAND_INSTR(0x31, 0x02),
ILI9881C_COMMAND_INSTR(0x60, 0x14),
- ILI9881C_COMMAND_INSTR(0xA0, 0x2A),
- ILI9881C_COMMAND_INSTR(0xA1, 0x39),
- ILI9881C_COMMAND_INSTR(0xA2, 0x46),
- ILI9881C_COMMAND_INSTR(0xA3, 0x0e),
- ILI9881C_COMMAND_INSTR(0xA4, 0x12),
- ILI9881C_COMMAND_INSTR(0xA5, 0x25),
- ILI9881C_COMMAND_INSTR(0xA6, 0x19),
- ILI9881C_COMMAND_INSTR(0xA7, 0x1d),
- ILI9881C_COMMAND_INSTR(0xA8, 0xa6),
- ILI9881C_COMMAND_INSTR(0xA9, 0x1C),
- ILI9881C_COMMAND_INSTR(0xAA, 0x29),
- ILI9881C_COMMAND_INSTR(0xAB, 0x85),
- ILI9881C_COMMAND_INSTR(0xAC, 0x1C),
- ILI9881C_COMMAND_INSTR(0xAD, 0x1B),
- ILI9881C_COMMAND_INSTR(0xAE, 0x51),
- ILI9881C_COMMAND_INSTR(0xAF, 0x22),
- ILI9881C_COMMAND_INSTR(0xB0, 0x2d),
- ILI9881C_COMMAND_INSTR(0xB1, 0x4f),
- ILI9881C_COMMAND_INSTR(0xB2, 0x59),
- ILI9881C_COMMAND_INSTR(0xB3, 0x3F),
- ILI9881C_COMMAND_INSTR(0xC0, 0x2A),
- ILI9881C_COMMAND_INSTR(0xC1, 0x3a),
- ILI9881C_COMMAND_INSTR(0xC2, 0x45),
- ILI9881C_COMMAND_INSTR(0xC3, 0x0e),
- ILI9881C_COMMAND_INSTR(0xC4, 0x11),
- ILI9881C_COMMAND_INSTR(0xC5, 0x24),
- ILI9881C_COMMAND_INSTR(0xC6, 0x1a),
- ILI9881C_COMMAND_INSTR(0xC7, 0x1c),
- ILI9881C_COMMAND_INSTR(0xC8, 0xaa),
- ILI9881C_COMMAND_INSTR(0xC9, 0x1C),
- ILI9881C_COMMAND_INSTR(0xCA, 0x29),
- ILI9881C_COMMAND_INSTR(0xCB, 0x96),
- ILI9881C_COMMAND_INSTR(0xCC, 0x1C),
- ILI9881C_COMMAND_INSTR(0xCD, 0x1B),
- ILI9881C_COMMAND_INSTR(0xCE, 0x51),
- ILI9881C_COMMAND_INSTR(0xCF, 0x22),
- ILI9881C_COMMAND_INSTR(0xD0, 0x2b),
- ILI9881C_COMMAND_INSTR(0xD1, 0x4b),
- ILI9881C_COMMAND_INSTR(0xD2, 0x59),
- ILI9881C_COMMAND_INSTR(0xD3, 0x3F),
+ ILI9881C_COMMAND_INSTR(0xa0, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xa1, 0x39),
+ ILI9881C_COMMAND_INSTR(0xa2, 0x46),
+ ILI9881C_COMMAND_INSTR(0xa3, 0x0e),
+ ILI9881C_COMMAND_INSTR(0xa4, 0x12),
+ ILI9881C_COMMAND_INSTR(0xa5, 0x25),
+ ILI9881C_COMMAND_INSTR(0xa6, 0x19),
+ ILI9881C_COMMAND_INSTR(0xa7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xa8, 0xa6),
+ ILI9881C_COMMAND_INSTR(0xa9, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xaa, 0x29),
+ ILI9881C_COMMAND_INSTR(0xab, 0x85),
+ ILI9881C_COMMAND_INSTR(0xac, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xad, 0x1b),
+ ILI9881C_COMMAND_INSTR(0xae, 0x51),
+ ILI9881C_COMMAND_INSTR(0xaf, 0x22),
+ ILI9881C_COMMAND_INSTR(0xb0, 0x2d),
+ ILI9881C_COMMAND_INSTR(0xb1, 0x4f),
+ ILI9881C_COMMAND_INSTR(0xb2, 0x59),
+ ILI9881C_COMMAND_INSTR(0xb3, 0x3f),
+ ILI9881C_COMMAND_INSTR(0xc0, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xc1, 0x3a),
+ ILI9881C_COMMAND_INSTR(0xc2, 0x45),
+ ILI9881C_COMMAND_INSTR(0xc3, 0x0e),
+ ILI9881C_COMMAND_INSTR(0xc4, 0x11),
+ ILI9881C_COMMAND_INSTR(0xc5, 0x24),
+ ILI9881C_COMMAND_INSTR(0xc6, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xc7, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xc8, 0xaa),
+ ILI9881C_COMMAND_INSTR(0xc9, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xca, 0x29),
+ ILI9881C_COMMAND_INSTR(0xcb, 0x96),
+ ILI9881C_COMMAND_INSTR(0xcc, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xcd, 0x1b),
+ ILI9881C_COMMAND_INSTR(0xce, 0x51),
+ ILI9881C_COMMAND_INSTR(0xcf, 0x22),
+ ILI9881C_COMMAND_INSTR(0xd0, 0x2b),
+ ILI9881C_COMMAND_INSTR(0xd1, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xd2, 0x59),
+ ILI9881C_COMMAND_INSTR(0xd3, 0x3f),
};
static const struct ili9881c_instr k101_im2byl02_init[] = {
@@ -276,12 +276,12 @@ static const struct ili9881c_instr k101_im2byl02_init[] = {
ILI9881C_COMMAND_INSTR(0x07, 0x00),
ILI9881C_COMMAND_INSTR(0x08, 0x00),
ILI9881C_COMMAND_INSTR(0x09, 0x00),
- ILI9881C_COMMAND_INSTR(0x0A, 0x01),
- ILI9881C_COMMAND_INSTR(0x0B, 0x01),
- ILI9881C_COMMAND_INSTR(0x0C, 0x00),
- ILI9881C_COMMAND_INSTR(0x0D, 0x01),
- ILI9881C_COMMAND_INSTR(0x0E, 0x01),
- ILI9881C_COMMAND_INSTR(0x0F, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x00),
ILI9881C_COMMAND_INSTR(0x10, 0x00),
ILI9881C_COMMAND_INSTR(0x11, 0x00),
ILI9881C_COMMAND_INSTR(0x12, 0x00),
@@ -292,12 +292,12 @@ static const struct ili9881c_instr k101_im2byl02_init[] = {
ILI9881C_COMMAND_INSTR(0x17, 0x00),
ILI9881C_COMMAND_INSTR(0x18, 0x00),
ILI9881C_COMMAND_INSTR(0x19, 0x00),
- ILI9881C_COMMAND_INSTR(0x1A, 0x00),
- ILI9881C_COMMAND_INSTR(0x1B, 0x00),
- ILI9881C_COMMAND_INSTR(0x1C, 0x00),
- ILI9881C_COMMAND_INSTR(0x1D, 0x00),
- ILI9881C_COMMAND_INSTR(0x1E, 0x40),
- ILI9881C_COMMAND_INSTR(0x1F, 0xC0),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0x40),
+ ILI9881C_COMMAND_INSTR(0x1f, 0xc0),
ILI9881C_COMMAND_INSTR(0x20, 0x06),
ILI9881C_COMMAND_INSTR(0x21, 0x01),
ILI9881C_COMMAND_INSTR(0x22, 0x06),
@@ -306,14 +306,14 @@ static const struct ili9881c_instr k101_im2byl02_init[] = {
ILI9881C_COMMAND_INSTR(0x25, 0x88),
ILI9881C_COMMAND_INSTR(0x26, 0x00),
ILI9881C_COMMAND_INSTR(0x27, 0x00),
- ILI9881C_COMMAND_INSTR(0x28, 0x3B),
+ ILI9881C_COMMAND_INSTR(0x28, 0x3b),
ILI9881C_COMMAND_INSTR(0x29, 0x03),
- ILI9881C_COMMAND_INSTR(0x2A, 0x00),
- ILI9881C_COMMAND_INSTR(0x2B, 0x00),
- ILI9881C_COMMAND_INSTR(0x2C, 0x00),
- ILI9881C_COMMAND_INSTR(0x2D, 0x00),
- ILI9881C_COMMAND_INSTR(0x2E, 0x00),
- ILI9881C_COMMAND_INSTR(0x2F, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
ILI9881C_COMMAND_INSTR(0x30, 0x00),
ILI9881C_COMMAND_INSTR(0x31, 0x00),
ILI9881C_COMMAND_INSTR(0x32, 0x00),
@@ -324,12 +324,12 @@ static const struct ili9881c_instr k101_im2byl02_init[] = {
ILI9881C_COMMAND_INSTR(0x37, 0x00),
ILI9881C_COMMAND_INSTR(0x38, 0x00),
ILI9881C_COMMAND_INSTR(0x39, 0x00),
- ILI9881C_COMMAND_INSTR(0x3A, 0x00),
- ILI9881C_COMMAND_INSTR(0x3B, 0x00),
- ILI9881C_COMMAND_INSTR(0x3C, 0x00),
- ILI9881C_COMMAND_INSTR(0x3D, 0x00),
- ILI9881C_COMMAND_INSTR(0x3E, 0x00),
- ILI9881C_COMMAND_INSTR(0x3F, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
ILI9881C_COMMAND_INSTR(0x40, 0x00),
ILI9881C_COMMAND_INSTR(0x41, 0x00),
ILI9881C_COMMAND_INSTR(0x42, 0x00),
@@ -340,17 +340,17 @@ static const struct ili9881c_instr k101_im2byl02_init[] = {
ILI9881C_COMMAND_INSTR(0x52, 0x45),
ILI9881C_COMMAND_INSTR(0x53, 0x67),
ILI9881C_COMMAND_INSTR(0x54, 0x89),
- ILI9881C_COMMAND_INSTR(0x55, 0xAB),
+ ILI9881C_COMMAND_INSTR(0x55, 0xab),
ILI9881C_COMMAND_INSTR(0x56, 0x01),
ILI9881C_COMMAND_INSTR(0x57, 0x23),
ILI9881C_COMMAND_INSTR(0x58, 0x45),
ILI9881C_COMMAND_INSTR(0x59, 0x67),
- ILI9881C_COMMAND_INSTR(0x5A, 0x89),
- ILI9881C_COMMAND_INSTR(0x5B, 0xAB),
- ILI9881C_COMMAND_INSTR(0x5C, 0xCD),
- ILI9881C_COMMAND_INSTR(0x5D, 0xEF),
- ILI9881C_COMMAND_INSTR(0x5E, 0x00),
- ILI9881C_COMMAND_INSTR(0x5F, 0x01),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x89),
+ ILI9881C_COMMAND_INSTR(0x5b, 0xab),
+ ILI9881C_COMMAND_INSTR(0x5c, 0xcd),
+ ILI9881C_COMMAND_INSTR(0x5d, 0xef),
+ ILI9881C_COMMAND_INSTR(0x5e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x01),
ILI9881C_COMMAND_INSTR(0x60, 0x01),
ILI9881C_COMMAND_INSTR(0x61, 0x06),
ILI9881C_COMMAND_INSTR(0x62, 0x06),
@@ -361,101 +361,101 @@ static const struct ili9881c_instr k101_im2byl02_init[] = {
ILI9881C_COMMAND_INSTR(0x67, 0x02),
ILI9881C_COMMAND_INSTR(0x68, 0x02),
ILI9881C_COMMAND_INSTR(0x69, 0x05),
- ILI9881C_COMMAND_INSTR(0x6A, 0x05),
- ILI9881C_COMMAND_INSTR(0x6B, 0x02),
- ILI9881C_COMMAND_INSTR(0x6C, 0x0D),
- ILI9881C_COMMAND_INSTR(0x6D, 0x0D),
- ILI9881C_COMMAND_INSTR(0x6E, 0x0C),
- ILI9881C_COMMAND_INSTR(0x6F, 0x0C),
- ILI9881C_COMMAND_INSTR(0x70, 0x0F),
- ILI9881C_COMMAND_INSTR(0x71, 0x0F),
- ILI9881C_COMMAND_INSTR(0x72, 0x0E),
- ILI9881C_COMMAND_INSTR(0x73, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x05),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x70, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x71, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x72, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x73, 0x0e),
ILI9881C_COMMAND_INSTR(0x74, 0x02),
ILI9881C_COMMAND_INSTR(0x75, 0x01),
ILI9881C_COMMAND_INSTR(0x76, 0x01),
ILI9881C_COMMAND_INSTR(0x77, 0x06),
ILI9881C_COMMAND_INSTR(0x78, 0x06),
ILI9881C_COMMAND_INSTR(0x79, 0x07),
- ILI9881C_COMMAND_INSTR(0x7A, 0x07),
- ILI9881C_COMMAND_INSTR(0x7B, 0x00),
- ILI9881C_COMMAND_INSTR(0x7C, 0x00),
- ILI9881C_COMMAND_INSTR(0x7D, 0x02),
- ILI9881C_COMMAND_INSTR(0x7E, 0x02),
- ILI9881C_COMMAND_INSTR(0x7F, 0x05),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x07),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x05),
ILI9881C_COMMAND_INSTR(0x80, 0x05),
ILI9881C_COMMAND_INSTR(0x81, 0x02),
- ILI9881C_COMMAND_INSTR(0x82, 0x0D),
- ILI9881C_COMMAND_INSTR(0x83, 0x0D),
- ILI9881C_COMMAND_INSTR(0x84, 0x0C),
- ILI9881C_COMMAND_INSTR(0x85, 0x0C),
- ILI9881C_COMMAND_INSTR(0x86, 0x0F),
- ILI9881C_COMMAND_INSTR(0x87, 0x0F),
- ILI9881C_COMMAND_INSTR(0x88, 0x0E),
- ILI9881C_COMMAND_INSTR(0x89, 0x0E),
- ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_COMMAND_INSTR(0x82, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x83, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x84, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x85, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x86, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x87, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x88, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x89, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x8a, 0x02),
ILI9881C_SWITCH_PAGE_INSTR(4),
- ILI9881C_COMMAND_INSTR(0x3B, 0xC0), /* ILI4003D sel */
- ILI9881C_COMMAND_INSTR(0x6C, 0x15), /* Set VCORE voltage = 1.5V */
- ILI9881C_COMMAND_INSTR(0x6E, 0x2A), /* di_pwr_reg=0 for power mode 2A, VGH clamp 18V */
- ILI9881C_COMMAND_INSTR(0x6F, 0x33), /* pumping ratio VGH=5x VGL=-3x */
- ILI9881C_COMMAND_INSTR(0x8D, 0x1B), /* VGL clamp -10V */
- ILI9881C_COMMAND_INSTR(0x87, 0xBA), /* ESD */
- ILI9881C_COMMAND_INSTR(0x3A, 0x24), /* POWER SAVING */
+ ILI9881C_COMMAND_INSTR(0x3b, 0xc0), /* ILI4003D sel */
+ ILI9881C_COMMAND_INSTR(0x6c, 0x15), /* Set VCORE voltage = 1.5V */
+ ILI9881C_COMMAND_INSTR(0x6e, 0x2a), /* di_pwr_reg=0 for power mode 2A, VGH clamp 18V */
+ ILI9881C_COMMAND_INSTR(0x6f, 0x33), /* pumping ratio VGH=5x VGL=-3x */
+ ILI9881C_COMMAND_INSTR(0x8d, 0x1b), /* VGL clamp -10V */
+ ILI9881C_COMMAND_INSTR(0x87, 0xba), /* ESD */
+ ILI9881C_COMMAND_INSTR(0x3a, 0x24), /* POWER SAVING */
ILI9881C_COMMAND_INSTR(0x26, 0x76),
- ILI9881C_COMMAND_INSTR(0xB2, 0xD1),
+ ILI9881C_COMMAND_INSTR(0xb2, 0xd1),
ILI9881C_SWITCH_PAGE_INSTR(1),
- ILI9881C_COMMAND_INSTR(0x22, 0x0A), /* BGR, SS */
+ ILI9881C_COMMAND_INSTR(0x22, 0x0a), /* BGR, SS */
ILI9881C_COMMAND_INSTR(0x31, 0x00), /* Zigzag type3 inversion */
ILI9881C_COMMAND_INSTR(0x40, 0x53), /* ILI4003D sel */
ILI9881C_COMMAND_INSTR(0x43, 0x66),
- ILI9881C_COMMAND_INSTR(0x53, 0x4C),
+ ILI9881C_COMMAND_INSTR(0x53, 0x4c),
ILI9881C_COMMAND_INSTR(0x50, 0x87),
ILI9881C_COMMAND_INSTR(0x51, 0x82),
ILI9881C_COMMAND_INSTR(0x60, 0x15),
ILI9881C_COMMAND_INSTR(0x61, 0x01),
- ILI9881C_COMMAND_INSTR(0x62, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x62, 0x0c),
ILI9881C_COMMAND_INSTR(0x63, 0x00),
- ILI9881C_COMMAND_INSTR(0xA0, 0x00),
- ILI9881C_COMMAND_INSTR(0xA1, 0x13), /* VP251 */
- ILI9881C_COMMAND_INSTR(0xA2, 0x23), /* VP247 */
- ILI9881C_COMMAND_INSTR(0xA3, 0x14), /* VP243 */
- ILI9881C_COMMAND_INSTR(0xA4, 0x16), /* VP239 */
- ILI9881C_COMMAND_INSTR(0xA5, 0x29), /* VP231 */
- ILI9881C_COMMAND_INSTR(0xA6, 0x1E), /* VP219 */
- ILI9881C_COMMAND_INSTR(0xA7, 0x1D), /* VP203 */
- ILI9881C_COMMAND_INSTR(0xA8, 0x86), /* VP175 */
- ILI9881C_COMMAND_INSTR(0xA9, 0x1E), /* VP144 */
- ILI9881C_COMMAND_INSTR(0xAA, 0x29), /* VP111 */
- ILI9881C_COMMAND_INSTR(0xAB, 0x74), /* VP80 */
- ILI9881C_COMMAND_INSTR(0xAC, 0x19), /* VP52 */
- ILI9881C_COMMAND_INSTR(0xAD, 0x17), /* VP36 */
- ILI9881C_COMMAND_INSTR(0xAE, 0x4B), /* VP24 */
- ILI9881C_COMMAND_INSTR(0xAF, 0x20), /* VP16 */
- ILI9881C_COMMAND_INSTR(0xB0, 0x26), /* VP12 */
- ILI9881C_COMMAND_INSTR(0xB1, 0x4C), /* VP8 */
- ILI9881C_COMMAND_INSTR(0xB2, 0x5D), /* VP4 */
- ILI9881C_COMMAND_INSTR(0xB3, 0x3F), /* VP0 */
- ILI9881C_COMMAND_INSTR(0xC0, 0x00), /* VN255 GAMMA N */
- ILI9881C_COMMAND_INSTR(0xC1, 0x13), /* VN251 */
- ILI9881C_COMMAND_INSTR(0xC2, 0x23), /* VN247 */
- ILI9881C_COMMAND_INSTR(0xC3, 0x14), /* VN243 */
- ILI9881C_COMMAND_INSTR(0xC4, 0x16), /* VN239 */
- ILI9881C_COMMAND_INSTR(0xC5, 0x29), /* VN231 */
- ILI9881C_COMMAND_INSTR(0xC6, 0x1E), /* VN219 */
- ILI9881C_COMMAND_INSTR(0xC7, 0x1D), /* VN203 */
- ILI9881C_COMMAND_INSTR(0xC8, 0x86), /* VN175 */
- ILI9881C_COMMAND_INSTR(0xC9, 0x1E), /* VN144 */
- ILI9881C_COMMAND_INSTR(0xCA, 0x29), /* VN111 */
- ILI9881C_COMMAND_INSTR(0xCB, 0x74), /* VN80 */
- ILI9881C_COMMAND_INSTR(0xCC, 0x19), /* VN52 */
- ILI9881C_COMMAND_INSTR(0xCD, 0x17), /* VN36 */
- ILI9881C_COMMAND_INSTR(0xCE, 0x4B), /* VN24 */
- ILI9881C_COMMAND_INSTR(0xCF, 0x20), /* VN16 */
- ILI9881C_COMMAND_INSTR(0xD0, 0x26), /* VN12 */
- ILI9881C_COMMAND_INSTR(0xD1, 0x4C), /* VN8 */
- ILI9881C_COMMAND_INSTR(0xD2, 0x5D), /* VN4 */
- ILI9881C_COMMAND_INSTR(0xD3, 0x3F), /* VN0 */
+ ILI9881C_COMMAND_INSTR(0xa0, 0x00),
+ ILI9881C_COMMAND_INSTR(0xa1, 0x13), /* VP251 */
+ ILI9881C_COMMAND_INSTR(0xa2, 0x23), /* VP247 */
+ ILI9881C_COMMAND_INSTR(0xa3, 0x14), /* VP243 */
+ ILI9881C_COMMAND_INSTR(0xa4, 0x16), /* VP239 */
+ ILI9881C_COMMAND_INSTR(0xa5, 0x29), /* VP231 */
+ ILI9881C_COMMAND_INSTR(0xa6, 0x1e), /* VP219 */
+ ILI9881C_COMMAND_INSTR(0xa7, 0x1d), /* VP203 */
+ ILI9881C_COMMAND_INSTR(0xa8, 0x86), /* VP175 */
+ ILI9881C_COMMAND_INSTR(0xa9, 0x1e), /* VP144 */
+ ILI9881C_COMMAND_INSTR(0xaa, 0x29), /* VP111 */
+ ILI9881C_COMMAND_INSTR(0xab, 0x74), /* VP80 */
+ ILI9881C_COMMAND_INSTR(0xac, 0x19), /* VP52 */
+ ILI9881C_COMMAND_INSTR(0xad, 0x17), /* VP36 */
+ ILI9881C_COMMAND_INSTR(0xae, 0x4b), /* VP24 */
+ ILI9881C_COMMAND_INSTR(0xaf, 0x20), /* VP16 */
+ ILI9881C_COMMAND_INSTR(0xb0, 0x26), /* VP12 */
+ ILI9881C_COMMAND_INSTR(0xb1, 0x4c), /* VP8 */
+ ILI9881C_COMMAND_INSTR(0xb2, 0x5d), /* VP4 */
+ ILI9881C_COMMAND_INSTR(0xb3, 0x3f), /* VP0 */
+ ILI9881C_COMMAND_INSTR(0xc0, 0x00), /* VN255 GAMMA N */
+ ILI9881C_COMMAND_INSTR(0xc1, 0x13), /* VN251 */
+ ILI9881C_COMMAND_INSTR(0xc2, 0x23), /* VN247 */
+ ILI9881C_COMMAND_INSTR(0xc3, 0x14), /* VN243 */
+ ILI9881C_COMMAND_INSTR(0xc4, 0x16), /* VN239 */
+ ILI9881C_COMMAND_INSTR(0xc5, 0x29), /* VN231 */
+ ILI9881C_COMMAND_INSTR(0xc6, 0x1e), /* VN219 */
+ ILI9881C_COMMAND_INSTR(0xc7, 0x1d), /* VN203 */
+ ILI9881C_COMMAND_INSTR(0xc8, 0x86), /* VN175 */
+ ILI9881C_COMMAND_INSTR(0xc9, 0x1e), /* VN144 */
+ ILI9881C_COMMAND_INSTR(0xca, 0x29), /* VN111 */
+ ILI9881C_COMMAND_INSTR(0xcb, 0x74), /* VN80 */
+ ILI9881C_COMMAND_INSTR(0xcc, 0x19), /* VN52 */
+ ILI9881C_COMMAND_INSTR(0xcd, 0x17), /* VN36 */
+ ILI9881C_COMMAND_INSTR(0xce, 0x4b), /* VN24 */
+ ILI9881C_COMMAND_INSTR(0xcf, 0x20), /* VN16 */
+ ILI9881C_COMMAND_INSTR(0xd0, 0x26), /* VN12 */
+ ILI9881C_COMMAND_INSTR(0xd1, 0x4c), /* VN8 */
+ ILI9881C_COMMAND_INSTR(0xd2, 0x5d), /* VN4 */
+ ILI9881C_COMMAND_INSTR(0xd3, 0x3f), /* VN0 */
};
static const struct ili9881c_instr kd050hdfia020_init[] = {
@@ -517,7 +517,7 @@ static const struct ili9881c_instr kd050hdfia020_init[] = {
ILI9881C_COMMAND_INSTR(0x35, 0x00),
ILI9881C_COMMAND_INSTR(0x36, 0x00),
ILI9881C_COMMAND_INSTR(0x37, 0x00),
- ILI9881C_COMMAND_INSTR(0x38, 0x3C),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3c),
ILI9881C_COMMAND_INSTR(0x39, 0x00),
ILI9881C_COMMAND_INSTR(0x3a, 0x40),
ILI9881C_COMMAND_INSTR(0x3b, 0x40),
@@ -549,10 +549,10 @@ static const struct ili9881c_instr kd050hdfia020_init[] = {
ILI9881C_COMMAND_INSTR(0x60, 0x00),
ILI9881C_COMMAND_INSTR(0x61, 0x15),
ILI9881C_COMMAND_INSTR(0x62, 0x14),
- ILI9881C_COMMAND_INSTR(0x63, 0x0E),
- ILI9881C_COMMAND_INSTR(0x64, 0x0F),
- ILI9881C_COMMAND_INSTR(0x65, 0x0C),
- ILI9881C_COMMAND_INSTR(0x66, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x63, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x64, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x65, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x66, 0x0d),
ILI9881C_COMMAND_INSTR(0x67, 0x06),
ILI9881C_COMMAND_INSTR(0x68, 0x02),
ILI9881C_COMMAND_INSTR(0x69, 0x07),
@@ -571,10 +571,10 @@ static const struct ili9881c_instr kd050hdfia020_init[] = {
ILI9881C_COMMAND_INSTR(0x76, 0x00),
ILI9881C_COMMAND_INSTR(0x77, 0x14),
ILI9881C_COMMAND_INSTR(0x78, 0x15),
- ILI9881C_COMMAND_INSTR(0x79, 0x0E),
- ILI9881C_COMMAND_INSTR(0x7a, 0x0F),
- ILI9881C_COMMAND_INSTR(0x7b, 0x0C),
- ILI9881C_COMMAND_INSTR(0x7c, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x79, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x0d),
ILI9881C_COMMAND_INSTR(0x7d, 0x06),
ILI9881C_COMMAND_INSTR(0x7e, 0x02),
ILI9881C_COMMAND_INSTR(0x7f, 0x07),
@@ -587,71 +587,71 @@ static const struct ili9881c_instr kd050hdfia020_init[] = {
ILI9881C_COMMAND_INSTR(0x87, 0x02),
ILI9881C_COMMAND_INSTR(0x88, 0x02),
ILI9881C_COMMAND_INSTR(0x89, 0x02),
- ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8a, 0x02),
ILI9881C_SWITCH_PAGE_INSTR(0x4),
- ILI9881C_COMMAND_INSTR(0x6C, 0x15),
- ILI9881C_COMMAND_INSTR(0x6E, 0x2A),
- ILI9881C_COMMAND_INSTR(0x6F, 0x33),
- ILI9881C_COMMAND_INSTR(0x3A, 0x94),
- ILI9881C_COMMAND_INSTR(0x8D, 0x15),
- ILI9881C_COMMAND_INSTR(0x87, 0xBA),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x2a),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x33),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x94),
+ ILI9881C_COMMAND_INSTR(0x8d, 0x15),
+ ILI9881C_COMMAND_INSTR(0x87, 0xba),
ILI9881C_COMMAND_INSTR(0x26, 0x76),
- ILI9881C_COMMAND_INSTR(0xB2, 0xD1),
- ILI9881C_COMMAND_INSTR(0xB5, 0x06),
+ ILI9881C_COMMAND_INSTR(0xb2, 0xd1),
+ ILI9881C_COMMAND_INSTR(0xb5, 0x06),
ILI9881C_SWITCH_PAGE_INSTR(0x1),
- ILI9881C_COMMAND_INSTR(0x22, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0a),
ILI9881C_COMMAND_INSTR(0x31, 0x00),
ILI9881C_COMMAND_INSTR(0x53, 0x90),
- ILI9881C_COMMAND_INSTR(0x55, 0xA2),
- ILI9881C_COMMAND_INSTR(0x50, 0xB7),
- ILI9881C_COMMAND_INSTR(0x51, 0xB7),
+ ILI9881C_COMMAND_INSTR(0x55, 0xa2),
+ ILI9881C_COMMAND_INSTR(0x50, 0xb7),
+ ILI9881C_COMMAND_INSTR(0x51, 0xb7),
ILI9881C_COMMAND_INSTR(0x60, 0x22),
ILI9881C_COMMAND_INSTR(0x61, 0x00),
ILI9881C_COMMAND_INSTR(0x62, 0x19),
ILI9881C_COMMAND_INSTR(0x63, 0x10),
- ILI9881C_COMMAND_INSTR(0xA0, 0x08),
- ILI9881C_COMMAND_INSTR(0xA1, 0x1A),
- ILI9881C_COMMAND_INSTR(0xA2, 0x27),
- ILI9881C_COMMAND_INSTR(0xA3, 0x15),
- ILI9881C_COMMAND_INSTR(0xA4, 0x17),
- ILI9881C_COMMAND_INSTR(0xA5, 0x2A),
- ILI9881C_COMMAND_INSTR(0xA6, 0x1E),
- ILI9881C_COMMAND_INSTR(0xA7, 0x1F),
- ILI9881C_COMMAND_INSTR(0xA8, 0x8B),
- ILI9881C_COMMAND_INSTR(0xA9, 0x1B),
- ILI9881C_COMMAND_INSTR(0xAA, 0x27),
- ILI9881C_COMMAND_INSTR(0xAB, 0x78),
- ILI9881C_COMMAND_INSTR(0xAC, 0x18),
- ILI9881C_COMMAND_INSTR(0xAD, 0x18),
- ILI9881C_COMMAND_INSTR(0xAE, 0x4C),
- ILI9881C_COMMAND_INSTR(0xAF, 0x21),
- ILI9881C_COMMAND_INSTR(0xB0, 0x27),
- ILI9881C_COMMAND_INSTR(0xB1, 0x54),
- ILI9881C_COMMAND_INSTR(0xB2, 0x67),
- ILI9881C_COMMAND_INSTR(0xB3, 0x39),
- ILI9881C_COMMAND_INSTR(0xC0, 0x08),
- ILI9881C_COMMAND_INSTR(0xC1, 0x1A),
- ILI9881C_COMMAND_INSTR(0xC2, 0x27),
- ILI9881C_COMMAND_INSTR(0xC3, 0x15),
- ILI9881C_COMMAND_INSTR(0xC4, 0x17),
- ILI9881C_COMMAND_INSTR(0xC5, 0x2A),
- ILI9881C_COMMAND_INSTR(0xC6, 0x1E),
- ILI9881C_COMMAND_INSTR(0xC7, 0x1F),
- ILI9881C_COMMAND_INSTR(0xC8, 0x8B),
- ILI9881C_COMMAND_INSTR(0xC9, 0x1B),
- ILI9881C_COMMAND_INSTR(0xCA, 0x27),
- ILI9881C_COMMAND_INSTR(0xCB, 0x78),
- ILI9881C_COMMAND_INSTR(0xCC, 0x18),
- ILI9881C_COMMAND_INSTR(0xCD, 0x18),
- ILI9881C_COMMAND_INSTR(0xCE, 0x4C),
- ILI9881C_COMMAND_INSTR(0xCF, 0x21),
- ILI9881C_COMMAND_INSTR(0xD0, 0x27),
- ILI9881C_COMMAND_INSTR(0xD1, 0x54),
- ILI9881C_COMMAND_INSTR(0xD2, 0x67),
- ILI9881C_COMMAND_INSTR(0xD3, 0x39),
+ ILI9881C_COMMAND_INSTR(0xa0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xa1, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xa2, 0x27),
+ ILI9881C_COMMAND_INSTR(0xa3, 0x15),
+ ILI9881C_COMMAND_INSTR(0xa4, 0x17),
+ ILI9881C_COMMAND_INSTR(0xa5, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xa6, 0x1e),
+ ILI9881C_COMMAND_INSTR(0xa7, 0x1f),
+ ILI9881C_COMMAND_INSTR(0xa8, 0x8b),
+ ILI9881C_COMMAND_INSTR(0xa9, 0x1b),
+ ILI9881C_COMMAND_INSTR(0xaa, 0x27),
+ ILI9881C_COMMAND_INSTR(0xab, 0x78),
+ ILI9881C_COMMAND_INSTR(0xac, 0x18),
+ ILI9881C_COMMAND_INSTR(0xad, 0x18),
+ ILI9881C_COMMAND_INSTR(0xae, 0x4c),
+ ILI9881C_COMMAND_INSTR(0xaf, 0x21),
+ ILI9881C_COMMAND_INSTR(0xb0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xb1, 0x54),
+ ILI9881C_COMMAND_INSTR(0xb2, 0x67),
+ ILI9881C_COMMAND_INSTR(0xb3, 0x39),
+ ILI9881C_COMMAND_INSTR(0xc0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xc1, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xc2, 0x27),
+ ILI9881C_COMMAND_INSTR(0xc3, 0x15),
+ ILI9881C_COMMAND_INSTR(0xc4, 0x17),
+ ILI9881C_COMMAND_INSTR(0xc5, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xc6, 0x1e),
+ ILI9881C_COMMAND_INSTR(0xc7, 0x1f),
+ ILI9881C_COMMAND_INSTR(0xc8, 0x8b),
+ ILI9881C_COMMAND_INSTR(0xc9, 0x1b),
+ ILI9881C_COMMAND_INSTR(0xca, 0x27),
+ ILI9881C_COMMAND_INSTR(0xcb, 0x78),
+ ILI9881C_COMMAND_INSTR(0xcc, 0x18),
+ ILI9881C_COMMAND_INSTR(0xcd, 0x18),
+ ILI9881C_COMMAND_INSTR(0xce, 0x4c),
+ ILI9881C_COMMAND_INSTR(0xcf, 0x21),
+ ILI9881C_COMMAND_INSTR(0xd0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xd1, 0x54),
+ ILI9881C_COMMAND_INSTR(0xd2, 0x67),
+ ILI9881C_COMMAND_INSTR(0xd3, 0x39),
ILI9881C_SWITCH_PAGE_INSTR(0),
ILI9881C_COMMAND_INSTR(0x35, 0x00),
- ILI9881C_COMMAND_INSTR(0x3A, 0x7),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x7),
};
static const struct ili9881c_instr tl050hdv35_init[] = {
@@ -696,7 +696,7 @@ static const struct ili9881c_instr tl050hdv35_init[] = {
ILI9881C_COMMAND_INSTR(0x35, 0x00),
ILI9881C_COMMAND_INSTR(0x36, 0x00),
ILI9881C_COMMAND_INSTR(0x37, 0x00),
- ILI9881C_COMMAND_INSTR(0x38, 0x3C),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3c),
ILI9881C_COMMAND_INSTR(0x39, 0x00),
ILI9881C_COMMAND_INSTR(0x3a, 0x40),
ILI9881C_COMMAND_INSTR(0x3b, 0x40),
@@ -750,7 +750,7 @@ static const struct ili9881c_instr tl050hdv35_init[] = {
ILI9881C_COMMAND_INSTR(0x7f, 0x07),
ILI9881C_COMMAND_INSTR(0x88, 0x02),
ILI9881C_COMMAND_INSTR(0x89, 0x02),
- ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8a, 0x02),
ILI9881C_SWITCH_PAGE_INSTR(4),
ILI9881C_COMMAND_INSTR(0x38, 0x01),
ILI9881C_COMMAND_INSTR(0x39, 0x00),
@@ -831,12 +831,12 @@ static const struct ili9881c_instr w552946ab_init[] = {
ILI9881C_COMMAND_INSTR(0x07, 0x02),
ILI9881C_COMMAND_INSTR(0x08, 0x02),
ILI9881C_COMMAND_INSTR(0x09, 0x00),
- ILI9881C_COMMAND_INSTR(0x0A, 0x00),
- ILI9881C_COMMAND_INSTR(0x0B, 0x00),
- ILI9881C_COMMAND_INSTR(0x0C, 0x00),
- ILI9881C_COMMAND_INSTR(0x0D, 0x00),
- ILI9881C_COMMAND_INSTR(0x0E, 0x00),
- ILI9881C_COMMAND_INSTR(0x0F, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x00),
ILI9881C_COMMAND_INSTR(0x10, 0x00),
ILI9881C_COMMAND_INSTR(0x11, 0x00),
@@ -848,12 +848,12 @@ static const struct ili9881c_instr w552946ab_init[] = {
ILI9881C_COMMAND_INSTR(0x17, 0x00),
ILI9881C_COMMAND_INSTR(0x18, 0x08),
ILI9881C_COMMAND_INSTR(0x19, 0x00),
- ILI9881C_COMMAND_INSTR(0x1A, 0x00),
- ILI9881C_COMMAND_INSTR(0x1B, 0x00),
- ILI9881C_COMMAND_INSTR(0x1C, 0x00),
- ILI9881C_COMMAND_INSTR(0x1D, 0x00),
- ILI9881C_COMMAND_INSTR(0x1E, 0xC0),
- ILI9881C_COMMAND_INSTR(0x1F, 0x80),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0xc0),
+ ILI9881C_COMMAND_INSTR(0x1f, 0x80),
ILI9881C_COMMAND_INSTR(0x20, 0x02),
ILI9881C_COMMAND_INSTR(0x21, 0x09),
@@ -865,12 +865,12 @@ static const struct ili9881c_instr w552946ab_init[] = {
ILI9881C_COMMAND_INSTR(0x27, 0x00),
ILI9881C_COMMAND_INSTR(0x28, 0x55),
ILI9881C_COMMAND_INSTR(0x29, 0x03),
- ILI9881C_COMMAND_INSTR(0x2A, 0x00),
- ILI9881C_COMMAND_INSTR(0x2B, 0x00),
- ILI9881C_COMMAND_INSTR(0x2C, 0x00),
- ILI9881C_COMMAND_INSTR(0x2D, 0x00),
- ILI9881C_COMMAND_INSTR(0x2E, 0x00),
- ILI9881C_COMMAND_INSTR(0x2F, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
ILI9881C_COMMAND_INSTR(0x30, 0x00),
ILI9881C_COMMAND_INSTR(0x31, 0x00),
@@ -880,54 +880,54 @@ static const struct ili9881c_instr w552946ab_init[] = {
ILI9881C_COMMAND_INSTR(0x35, 0x05),
ILI9881C_COMMAND_INSTR(0x36, 0x05),
ILI9881C_COMMAND_INSTR(0x37, 0x00),
- ILI9881C_COMMAND_INSTR(0x38, 0x3C),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3c),
ILI9881C_COMMAND_INSTR(0x39, 0x35),
- ILI9881C_COMMAND_INSTR(0x3A, 0x00),
- ILI9881C_COMMAND_INSTR(0x3B, 0x40),
- ILI9881C_COMMAND_INSTR(0x3C, 0x00),
- ILI9881C_COMMAND_INSTR(0x3D, 0x00),
- ILI9881C_COMMAND_INSTR(0x3E, 0x00),
- ILI9881C_COMMAND_INSTR(0x3F, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x40),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
ILI9881C_COMMAND_INSTR(0x40, 0x00),
ILI9881C_COMMAND_INSTR(0x41, 0x88),
ILI9881C_COMMAND_INSTR(0x42, 0x00),
ILI9881C_COMMAND_INSTR(0x43, 0x00),
- ILI9881C_COMMAND_INSTR(0x44, 0x1F),
+ ILI9881C_COMMAND_INSTR(0x44, 0x1f),
ILI9881C_COMMAND_INSTR(0x50, 0x01),
ILI9881C_COMMAND_INSTR(0x51, 0x23),
ILI9881C_COMMAND_INSTR(0x52, 0x45),
ILI9881C_COMMAND_INSTR(0x53, 0x67),
ILI9881C_COMMAND_INSTR(0x54, 0x89),
- ILI9881C_COMMAND_INSTR(0x55, 0xaB),
+ ILI9881C_COMMAND_INSTR(0x55, 0xab),
ILI9881C_COMMAND_INSTR(0x56, 0x01),
ILI9881C_COMMAND_INSTR(0x57, 0x23),
ILI9881C_COMMAND_INSTR(0x58, 0x45),
ILI9881C_COMMAND_INSTR(0x59, 0x67),
- ILI9881C_COMMAND_INSTR(0x5A, 0x89),
- ILI9881C_COMMAND_INSTR(0x5B, 0xAB),
- ILI9881C_COMMAND_INSTR(0x5C, 0xCD),
- ILI9881C_COMMAND_INSTR(0x5D, 0xEF),
- ILI9881C_COMMAND_INSTR(0x5E, 0x03),
- ILI9881C_COMMAND_INSTR(0x5F, 0x14),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x89),
+ ILI9881C_COMMAND_INSTR(0x5b, 0xab),
+ ILI9881C_COMMAND_INSTR(0x5c, 0xcd),
+ ILI9881C_COMMAND_INSTR(0x5d, 0xef),
+ ILI9881C_COMMAND_INSTR(0x5e, 0x03),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x14),
ILI9881C_COMMAND_INSTR(0x60, 0x15),
- ILI9881C_COMMAND_INSTR(0x61, 0x0C),
- ILI9881C_COMMAND_INSTR(0x62, 0x0D),
- ILI9881C_COMMAND_INSTR(0x63, 0x0E),
- ILI9881C_COMMAND_INSTR(0x64, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x61, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x62, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x63, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x64, 0x0f),
ILI9881C_COMMAND_INSTR(0x65, 0x10),
ILI9881C_COMMAND_INSTR(0x66, 0x11),
ILI9881C_COMMAND_INSTR(0x67, 0x08),
ILI9881C_COMMAND_INSTR(0x68, 0x02),
- ILI9881C_COMMAND_INSTR(0x69, 0x0A),
- ILI9881C_COMMAND_INSTR(0x6A, 0x02),
- ILI9881C_COMMAND_INSTR(0x6B, 0x02),
- ILI9881C_COMMAND_INSTR(0x6C, 0x02),
- ILI9881C_COMMAND_INSTR(0x6D, 0x02),
- ILI9881C_COMMAND_INSTR(0x6E, 0x02),
- ILI9881C_COMMAND_INSTR(0x6F, 0x02),
+ ILI9881C_COMMAND_INSTR(0x69, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x02),
ILI9881C_COMMAND_INSTR(0x70, 0x02),
ILI9881C_COMMAND_INSTR(0x71, 0x02),
@@ -936,15 +936,15 @@ static const struct ili9881c_instr w552946ab_init[] = {
ILI9881C_COMMAND_INSTR(0x74, 0x02),
ILI9881C_COMMAND_INSTR(0x75, 0x14),
ILI9881C_COMMAND_INSTR(0x76, 0x15),
- ILI9881C_COMMAND_INSTR(0x77, 0x0F),
- ILI9881C_COMMAND_INSTR(0x78, 0x0E),
- ILI9881C_COMMAND_INSTR(0x79, 0x0D),
- ILI9881C_COMMAND_INSTR(0x7A, 0x0C),
- ILI9881C_COMMAND_INSTR(0x7B, 0x11),
- ILI9881C_COMMAND_INSTR(0x7C, 0x10),
- ILI9881C_COMMAND_INSTR(0x7D, 0x06),
- ILI9881C_COMMAND_INSTR(0x7E, 0x02),
- ILI9881C_COMMAND_INSTR(0x7F, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x77, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x78, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x79, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x11),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x10),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x06),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x0a),
ILI9881C_COMMAND_INSTR(0x80, 0x02),
ILI9881C_COMMAND_INSTR(0x81, 0x02),
@@ -956,74 +956,74 @@ static const struct ili9881c_instr w552946ab_init[] = {
ILI9881C_COMMAND_INSTR(0x87, 0x02),
ILI9881C_COMMAND_INSTR(0x88, 0x08),
ILI9881C_COMMAND_INSTR(0x89, 0x02),
- ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8a, 0x02),
ILI9881C_SWITCH_PAGE_INSTR(4),
ILI9881C_COMMAND_INSTR(0x00, 0x80),
ILI9881C_COMMAND_INSTR(0x70, 0x00),
ILI9881C_COMMAND_INSTR(0x71, 0x00),
- ILI9881C_COMMAND_INSTR(0x66, 0xFE),
+ ILI9881C_COMMAND_INSTR(0x66, 0xfe),
ILI9881C_COMMAND_INSTR(0x82, 0x15),
ILI9881C_COMMAND_INSTR(0x84, 0x15),
ILI9881C_COMMAND_INSTR(0x85, 0x15),
ILI9881C_COMMAND_INSTR(0x3a, 0x24),
- ILI9881C_COMMAND_INSTR(0x32, 0xAC),
- ILI9881C_COMMAND_INSTR(0x8C, 0x80),
- ILI9881C_COMMAND_INSTR(0x3C, 0xF5),
+ ILI9881C_COMMAND_INSTR(0x32, 0xac),
+ ILI9881C_COMMAND_INSTR(0x8c, 0x80),
+ ILI9881C_COMMAND_INSTR(0x3c, 0xf5),
ILI9881C_COMMAND_INSTR(0x88, 0x33),
ILI9881C_SWITCH_PAGE_INSTR(1),
- ILI9881C_COMMAND_INSTR(0x22, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0a),
ILI9881C_COMMAND_INSTR(0x31, 0x00),
ILI9881C_COMMAND_INSTR(0x53, 0x78),
- ILI9881C_COMMAND_INSTR(0x50, 0x5B),
- ILI9881C_COMMAND_INSTR(0x51, 0x5B),
+ ILI9881C_COMMAND_INSTR(0x50, 0x5b),
+ ILI9881C_COMMAND_INSTR(0x51, 0x5b),
ILI9881C_COMMAND_INSTR(0x60, 0x20),
ILI9881C_COMMAND_INSTR(0x61, 0x00),
- ILI9881C_COMMAND_INSTR(0x62, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x62, 0x0d),
ILI9881C_COMMAND_INSTR(0x63, 0x00),
- ILI9881C_COMMAND_INSTR(0xA0, 0x00),
- ILI9881C_COMMAND_INSTR(0xA1, 0x10),
- ILI9881C_COMMAND_INSTR(0xA2, 0x1C),
- ILI9881C_COMMAND_INSTR(0xA3, 0x13),
- ILI9881C_COMMAND_INSTR(0xA4, 0x15),
- ILI9881C_COMMAND_INSTR(0xA5, 0x26),
- ILI9881C_COMMAND_INSTR(0xA6, 0x1A),
- ILI9881C_COMMAND_INSTR(0xA7, 0x1D),
- ILI9881C_COMMAND_INSTR(0xA8, 0x67),
- ILI9881C_COMMAND_INSTR(0xA9, 0x1C),
- ILI9881C_COMMAND_INSTR(0xAA, 0x29),
- ILI9881C_COMMAND_INSTR(0xAB, 0x5B),
- ILI9881C_COMMAND_INSTR(0xAC, 0x26),
- ILI9881C_COMMAND_INSTR(0xAD, 0x28),
- ILI9881C_COMMAND_INSTR(0xAE, 0x5C),
- ILI9881C_COMMAND_INSTR(0xAF, 0x30),
- ILI9881C_COMMAND_INSTR(0xB0, 0x31),
- ILI9881C_COMMAND_INSTR(0xB1, 0x2E),
- ILI9881C_COMMAND_INSTR(0xB2, 0x32),
- ILI9881C_COMMAND_INSTR(0xB3, 0x00),
-
- ILI9881C_COMMAND_INSTR(0xC0, 0x00),
- ILI9881C_COMMAND_INSTR(0xC1, 0x10),
- ILI9881C_COMMAND_INSTR(0xC2, 0x1C),
- ILI9881C_COMMAND_INSTR(0xC3, 0x13),
- ILI9881C_COMMAND_INSTR(0xC4, 0x15),
- ILI9881C_COMMAND_INSTR(0xC5, 0x26),
- ILI9881C_COMMAND_INSTR(0xC6, 0x1A),
- ILI9881C_COMMAND_INSTR(0xC7, 0x1D),
- ILI9881C_COMMAND_INSTR(0xC8, 0x67),
- ILI9881C_COMMAND_INSTR(0xC9, 0x1C),
- ILI9881C_COMMAND_INSTR(0xCA, 0x29),
- ILI9881C_COMMAND_INSTR(0xCB, 0x5B),
- ILI9881C_COMMAND_INSTR(0xCC, 0x26),
- ILI9881C_COMMAND_INSTR(0xCD, 0x28),
- ILI9881C_COMMAND_INSTR(0xCE, 0x5C),
- ILI9881C_COMMAND_INSTR(0xCF, 0x30),
- ILI9881C_COMMAND_INSTR(0xD0, 0x31),
- ILI9881C_COMMAND_INSTR(0xD1, 0x2E),
- ILI9881C_COMMAND_INSTR(0xD2, 0x32),
- ILI9881C_COMMAND_INSTR(0xD3, 0x00),
+ ILI9881C_COMMAND_INSTR(0xa0, 0x00),
+ ILI9881C_COMMAND_INSTR(0xa1, 0x10),
+ ILI9881C_COMMAND_INSTR(0xa2, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xa3, 0x13),
+ ILI9881C_COMMAND_INSTR(0xa4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xa5, 0x26),
+ ILI9881C_COMMAND_INSTR(0xa6, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xa7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xa8, 0x67),
+ ILI9881C_COMMAND_INSTR(0xa9, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xaa, 0x29),
+ ILI9881C_COMMAND_INSTR(0xab, 0x5b),
+ ILI9881C_COMMAND_INSTR(0xac, 0x26),
+ ILI9881C_COMMAND_INSTR(0xad, 0x28),
+ ILI9881C_COMMAND_INSTR(0xae, 0x5c),
+ ILI9881C_COMMAND_INSTR(0xaf, 0x30),
+ ILI9881C_COMMAND_INSTR(0xb0, 0x31),
+ ILI9881C_COMMAND_INSTR(0xb1, 0x2e),
+ ILI9881C_COMMAND_INSTR(0xb2, 0x32),
+ ILI9881C_COMMAND_INSTR(0xb3, 0x00),
+
+ ILI9881C_COMMAND_INSTR(0xc0, 0x00),
+ ILI9881C_COMMAND_INSTR(0xc1, 0x10),
+ ILI9881C_COMMAND_INSTR(0xc2, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xc3, 0x13),
+ ILI9881C_COMMAND_INSTR(0xc4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xc5, 0x26),
+ ILI9881C_COMMAND_INSTR(0xc6, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xc7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xc8, 0x67),
+ ILI9881C_COMMAND_INSTR(0xc9, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xca, 0x29),
+ ILI9881C_COMMAND_INSTR(0xcb, 0x5b),
+ ILI9881C_COMMAND_INSTR(0xcc, 0x26),
+ ILI9881C_COMMAND_INSTR(0xcd, 0x28),
+ ILI9881C_COMMAND_INSTR(0xce, 0x5c),
+ ILI9881C_COMMAND_INSTR(0xcf, 0x30),
+ ILI9881C_COMMAND_INSTR(0xd0, 0x31),
+ ILI9881C_COMMAND_INSTR(0xd1, 0x2e),
+ ILI9881C_COMMAND_INSTR(0xd2, 0x32),
+ ILI9881C_COMMAND_INSTR(0xd3, 0x00),
ILI9881C_SWITCH_PAGE_INSTR(0),
};
@@ -1032,10 +1032,10 @@ static const struct ili9881c_instr am8001280g_init[] = {
ILI9881C_COMMAND_INSTR(0x01, 0x00),
ILI9881C_COMMAND_INSTR(0x02, 0x00),
ILI9881C_COMMAND_INSTR(0x03, 0x73),
- ILI9881C_COMMAND_INSTR(0x04, 0xD3),
+ ILI9881C_COMMAND_INSTR(0x04, 0xd3),
ILI9881C_COMMAND_INSTR(0x05, 0x00),
- ILI9881C_COMMAND_INSTR(0x06, 0x0A),
- ILI9881C_COMMAND_INSTR(0x07, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x06, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x07, 0x0e),
ILI9881C_COMMAND_INSTR(0x08, 0x00),
ILI9881C_COMMAND_INSTR(0x09, 0x01),
ILI9881C_COMMAND_INSTR(0x0a, 0x01),
@@ -1117,10 +1117,10 @@ static const struct ili9881c_instr am8001280g_init[] = {
ILI9881C_COMMAND_INSTR(0x5f, 0x02),
ILI9881C_COMMAND_INSTR(0x60, 0x00),
ILI9881C_COMMAND_INSTR(0x61, 0x01),
- ILI9881C_COMMAND_INSTR(0x62, 0x0D),
- ILI9881C_COMMAND_INSTR(0x63, 0x0C),
- ILI9881C_COMMAND_INSTR(0x64, 0x0F),
- ILI9881C_COMMAND_INSTR(0x65, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x62, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x63, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x64, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x65, 0x0e),
ILI9881C_COMMAND_INSTR(0x66, 0x06),
ILI9881C_COMMAND_INSTR(0x67, 0x07),
ILI9881C_COMMAND_INSTR(0x68, 0x02),
@@ -1139,10 +1139,10 @@ static const struct ili9881c_instr am8001280g_init[] = {
ILI9881C_COMMAND_INSTR(0x75, 0x02),
ILI9881C_COMMAND_INSTR(0x76, 0x00),
ILI9881C_COMMAND_INSTR(0x77, 0x01),
- ILI9881C_COMMAND_INSTR(0x78, 0x0D),
- ILI9881C_COMMAND_INSTR(0x79, 0x0C),
- ILI9881C_COMMAND_INSTR(0x7a, 0x0F),
- ILI9881C_COMMAND_INSTR(0x7b, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x78, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x79, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x0e),
ILI9881C_COMMAND_INSTR(0x7c, 0x06),
ILI9881C_COMMAND_INSTR(0x7d, 0x07),
ILI9881C_COMMAND_INSTR(0x7e, 0x02),
@@ -1157,7 +1157,7 @@ static const struct ili9881c_instr am8001280g_init[] = {
ILI9881C_COMMAND_INSTR(0x87, 0x02),
ILI9881C_COMMAND_INSTR(0x88, 0x02),
ILI9881C_COMMAND_INSTR(0x89, 0x02),
- ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8a, 0x02),
ILI9881C_SWITCH_PAGE_INSTR(4),
ILI9881C_COMMAND_INSTR(0x6c, 0x15),
@@ -1170,60 +1170,248 @@ static const struct ili9881c_instr am8001280g_init[] = {
ILI9881C_COMMAND_INSTR(0xb2, 0xd1),
ILI9881C_SWITCH_PAGE_INSTR(1),
- ILI9881C_COMMAND_INSTR(0x22, 0x0A),
- ILI9881C_COMMAND_INSTR(0x31, 0x0B),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x31, 0x0b),
ILI9881C_COMMAND_INSTR(0x50, 0xa5),
ILI9881C_COMMAND_INSTR(0x51, 0xa0),
ILI9881C_COMMAND_INSTR(0x53, 0x70),
- ILI9881C_COMMAND_INSTR(0x55, 0x7A),
+ ILI9881C_COMMAND_INSTR(0x55, 0x7a),
ILI9881C_COMMAND_INSTR(0x60, 0x14),
- ILI9881C_COMMAND_INSTR(0xA0, 0x00),
- ILI9881C_COMMAND_INSTR(0xA1, 0x53),
- ILI9881C_COMMAND_INSTR(0xA2, 0x50),
- ILI9881C_COMMAND_INSTR(0xA3, 0x20),
- ILI9881C_COMMAND_INSTR(0xA4, 0x27),
- ILI9881C_COMMAND_INSTR(0xA5, 0x33),
- ILI9881C_COMMAND_INSTR(0xA6, 0x25),
- ILI9881C_COMMAND_INSTR(0xA7, 0x25),
- ILI9881C_COMMAND_INSTR(0xA8, 0xD4),
- ILI9881C_COMMAND_INSTR(0xA9, 0x1A),
- ILI9881C_COMMAND_INSTR(0xAA, 0x2B),
- ILI9881C_COMMAND_INSTR(0xAB, 0xB5),
- ILI9881C_COMMAND_INSTR(0xAC, 0x19),
- ILI9881C_COMMAND_INSTR(0xAD, 0x18),
- ILI9881C_COMMAND_INSTR(0xAE, 0x53),
- ILI9881C_COMMAND_INSTR(0xAF, 0x1A),
- ILI9881C_COMMAND_INSTR(0xB0, 0x25),
- ILI9881C_COMMAND_INSTR(0xB1, 0x62),
- ILI9881C_COMMAND_INSTR(0xB2, 0x6A),
- ILI9881C_COMMAND_INSTR(0xB3, 0x31),
-
- ILI9881C_COMMAND_INSTR(0xC0, 0x00),
- ILI9881C_COMMAND_INSTR(0xC1, 0x53),
- ILI9881C_COMMAND_INSTR(0xC2, 0x50),
- ILI9881C_COMMAND_INSTR(0xC3, 0x20),
- ILI9881C_COMMAND_INSTR(0xC4, 0x27),
- ILI9881C_COMMAND_INSTR(0xC5, 0x33),
- ILI9881C_COMMAND_INSTR(0xC6, 0x25),
- ILI9881C_COMMAND_INSTR(0xC7, 0x25),
- ILI9881C_COMMAND_INSTR(0xC8, 0xD4),
- ILI9881C_COMMAND_INSTR(0xC9, 0x1A),
- ILI9881C_COMMAND_INSTR(0xCA, 0x2B),
- ILI9881C_COMMAND_INSTR(0xCB, 0xB5),
- ILI9881C_COMMAND_INSTR(0xCC, 0x19),
- ILI9881C_COMMAND_INSTR(0xCD, 0x18),
- ILI9881C_COMMAND_INSTR(0xCE, 0x53),
- ILI9881C_COMMAND_INSTR(0xCF, 0x1A),
- ILI9881C_COMMAND_INSTR(0xD0, 0x25),
- ILI9881C_COMMAND_INSTR(0xD1, 0x62),
- ILI9881C_COMMAND_INSTR(0xD2, 0x6A),
- ILI9881C_COMMAND_INSTR(0xD3, 0x31),
+ ILI9881C_COMMAND_INSTR(0xa0, 0x00),
+ ILI9881C_COMMAND_INSTR(0xa1, 0x53),
+ ILI9881C_COMMAND_INSTR(0xa2, 0x50),
+ ILI9881C_COMMAND_INSTR(0xa3, 0x20),
+ ILI9881C_COMMAND_INSTR(0xa4, 0x27),
+ ILI9881C_COMMAND_INSTR(0xa5, 0x33),
+ ILI9881C_COMMAND_INSTR(0xa6, 0x25),
+ ILI9881C_COMMAND_INSTR(0xa7, 0x25),
+ ILI9881C_COMMAND_INSTR(0xa8, 0xd4),
+ ILI9881C_COMMAND_INSTR(0xa9, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xaa, 0x2b),
+ ILI9881C_COMMAND_INSTR(0xab, 0xb5),
+ ILI9881C_COMMAND_INSTR(0xac, 0x19),
+ ILI9881C_COMMAND_INSTR(0xad, 0x18),
+ ILI9881C_COMMAND_INSTR(0xae, 0x53),
+ ILI9881C_COMMAND_INSTR(0xaf, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xb0, 0x25),
+ ILI9881C_COMMAND_INSTR(0xb1, 0x62),
+ ILI9881C_COMMAND_INSTR(0xb2, 0x6a),
+ ILI9881C_COMMAND_INSTR(0xb3, 0x31),
+
+ ILI9881C_COMMAND_INSTR(0xc0, 0x00),
+ ILI9881C_COMMAND_INSTR(0xc1, 0x53),
+ ILI9881C_COMMAND_INSTR(0xc2, 0x50),
+ ILI9881C_COMMAND_INSTR(0xc3, 0x20),
+ ILI9881C_COMMAND_INSTR(0xc4, 0x27),
+ ILI9881C_COMMAND_INSTR(0xc5, 0x33),
+ ILI9881C_COMMAND_INSTR(0xc6, 0x25),
+ ILI9881C_COMMAND_INSTR(0xc7, 0x25),
+ ILI9881C_COMMAND_INSTR(0xc8, 0xd4),
+ ILI9881C_COMMAND_INSTR(0xc9, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xca, 0x2b),
+ ILI9881C_COMMAND_INSTR(0xcb, 0xb5),
+ ILI9881C_COMMAND_INSTR(0xcc, 0x19),
+ ILI9881C_COMMAND_INSTR(0xcd, 0x18),
+ ILI9881C_COMMAND_INSTR(0xce, 0x53),
+ ILI9881C_COMMAND_INSTR(0xcf, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xd0, 0x25),
+ ILI9881C_COMMAND_INSTR(0xd1, 0x62),
+ ILI9881C_COMMAND_INSTR(0xd2, 0x6a),
+ ILI9881C_COMMAND_INSTR(0xd3, 0x31),
ILI9881C_SWITCH_PAGE_INSTR(0),
ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c),
ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_POWER_SAVE, 0x00),
};
+static const struct ili9881c_instr rpi_5inch_init[] = {
+ ILI9881C_SWITCH_PAGE_INSTR(3),
+ ILI9881C_COMMAND_INSTR(0x01, 0x00),
+ ILI9881C_COMMAND_INSTR(0x02, 0x00),
+ ILI9881C_COMMAND_INSTR(0x03, 0x73),
+ ILI9881C_COMMAND_INSTR(0x04, 0x73),
+ ILI9881C_COMMAND_INSTR(0x05, 0x00),
+ ILI9881C_COMMAND_INSTR(0x06, 0x06),
+ ILI9881C_COMMAND_INSTR(0x07, 0x02),
+ ILI9881C_COMMAND_INSTR(0x08, 0x00),
+ ILI9881C_COMMAND_INSTR(0x09, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x01),
+ ILI9881C_COMMAND_INSTR(0x10, 0x01),
+ ILI9881C_COMMAND_INSTR(0x11, 0x00),
+ ILI9881C_COMMAND_INSTR(0x12, 0x00),
+ ILI9881C_COMMAND_INSTR(0x13, 0x01),
+ ILI9881C_COMMAND_INSTR(0x14, 0x00),
+ ILI9881C_COMMAND_INSTR(0x15, 0x00),
+ ILI9881C_COMMAND_INSTR(0x16, 0x00),
+ ILI9881C_COMMAND_INSTR(0x17, 0x00),
+ ILI9881C_COMMAND_INSTR(0x18, 0x00),
+ ILI9881C_COMMAND_INSTR(0x19, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0xc0),
+ ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+ ILI9881C_COMMAND_INSTR(0x20, 0x04),
+ ILI9881C_COMMAND_INSTR(0x21, 0x03),
+ ILI9881C_COMMAND_INSTR(0x22, 0x00),
+ ILI9881C_COMMAND_INSTR(0x23, 0x00),
+ ILI9881C_COMMAND_INSTR(0x24, 0x00),
+ ILI9881C_COMMAND_INSTR(0x25, 0x00),
+ ILI9881C_COMMAND_INSTR(0x26, 0x00),
+ ILI9881C_COMMAND_INSTR(0x27, 0x00),
+ ILI9881C_COMMAND_INSTR(0x28, 0x33),
+ ILI9881C_COMMAND_INSTR(0x29, 0x03),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x30, 0x00),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x32, 0x00),
+ ILI9881C_COMMAND_INSTR(0x33, 0x00),
+ ILI9881C_COMMAND_INSTR(0x34, 0x03),
+ ILI9881C_COMMAND_INSTR(0x35, 0x00),
+ ILI9881C_COMMAND_INSTR(0x36, 0x03),
+ ILI9881C_COMMAND_INSTR(0x37, 0x00),
+ ILI9881C_COMMAND_INSTR(0x38, 0x00),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x40, 0x00),
+ ILI9881C_COMMAND_INSTR(0x41, 0x00),
+ ILI9881C_COMMAND_INSTR(0x42, 0x00),
+ ILI9881C_COMMAND_INSTR(0x43, 0x00),
+ ILI9881C_COMMAND_INSTR(0x44, 0x00),
+ ILI9881C_COMMAND_INSTR(0x50, 0x01),
+ ILI9881C_COMMAND_INSTR(0x51, 0x23),
+ ILI9881C_COMMAND_INSTR(0x52, 0x45),
+ ILI9881C_COMMAND_INSTR(0x53, 0x67),
+ ILI9881C_COMMAND_INSTR(0x54, 0x89),
+ ILI9881C_COMMAND_INSTR(0x55, 0xab),
+ ILI9881C_COMMAND_INSTR(0x56, 0x01),
+ ILI9881C_COMMAND_INSTR(0x57, 0x23),
+ ILI9881C_COMMAND_INSTR(0x58, 0x45),
+ ILI9881C_COMMAND_INSTR(0x59, 0x67),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x89),
+ ILI9881C_COMMAND_INSTR(0x5b, 0xab),
+ ILI9881C_COMMAND_INSTR(0x5c, 0xcd),
+ ILI9881C_COMMAND_INSTR(0x5d, 0xef),
+ ILI9881C_COMMAND_INSTR(0x5e, 0x10),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x09),
+ ILI9881C_COMMAND_INSTR(0x60, 0x08),
+ ILI9881C_COMMAND_INSTR(0x61, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x62, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x63, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x64, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x65, 0x02),
+ ILI9881C_COMMAND_INSTR(0x66, 0x02),
+ ILI9881C_COMMAND_INSTR(0x67, 0x02),
+ ILI9881C_COMMAND_INSTR(0x68, 0x02),
+ ILI9881C_COMMAND_INSTR(0x69, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x70, 0x02),
+ ILI9881C_COMMAND_INSTR(0x71, 0x06),
+ ILI9881C_COMMAND_INSTR(0x72, 0x07),
+ ILI9881C_COMMAND_INSTR(0x73, 0x02),
+ ILI9881C_COMMAND_INSTR(0x74, 0x02),
+ ILI9881C_COMMAND_INSTR(0x75, 0x06),
+ ILI9881C_COMMAND_INSTR(0x76, 0x07),
+ ILI9881C_COMMAND_INSTR(0x77, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x78, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x79, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x80, 0x02),
+ ILI9881C_COMMAND_INSTR(0x81, 0x02),
+ ILI9881C_COMMAND_INSTR(0x82, 0x02),
+ ILI9881C_COMMAND_INSTR(0x83, 0x02),
+ ILI9881C_COMMAND_INSTR(0x84, 0x02),
+ ILI9881C_COMMAND_INSTR(0x85, 0x02),
+ ILI9881C_COMMAND_INSTR(0x86, 0x02),
+ ILI9881C_COMMAND_INSTR(0x87, 0x09),
+ ILI9881C_COMMAND_INSTR(0x88, 0x08),
+ ILI9881C_COMMAND_INSTR(0x89, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8a, 0x02),
+ ILI9881C_SWITCH_PAGE_INSTR(4),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x2a),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x57),
+ ILI9881C_COMMAND_INSTR(0x3a, 0xa4),
+ ILI9881C_COMMAND_INSTR(0x8d, 0x1a),
+ ILI9881C_COMMAND_INSTR(0x87, 0xba),
+ ILI9881C_COMMAND_INSTR(0x26, 0x76),
+ ILI9881C_COMMAND_INSTR(0xb2, 0xd1),
+ ILI9881C_SWITCH_PAGE_INSTR(1),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x53, 0x35),
+ ILI9881C_COMMAND_INSTR(0x55, 0x50),
+ ILI9881C_COMMAND_INSTR(0x50, 0xaf),
+ ILI9881C_COMMAND_INSTR(0x51, 0xaf),
+ ILI9881C_COMMAND_INSTR(0x60, 0x14),
+ ILI9881C_COMMAND_INSTR(0xa0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xa1, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xa2, 0x2c),
+ ILI9881C_COMMAND_INSTR(0xa3, 0x14),
+ ILI9881C_COMMAND_INSTR(0xa4, 0x19),
+ ILI9881C_COMMAND_INSTR(0xa5, 0x2e),
+ ILI9881C_COMMAND_INSTR(0xa6, 0x22),
+ ILI9881C_COMMAND_INSTR(0xa7, 0x23),
+ ILI9881C_COMMAND_INSTR(0xa8, 0x97),
+ ILI9881C_COMMAND_INSTR(0xa9, 0x1e),
+ ILI9881C_COMMAND_INSTR(0xaa, 0x29),
+ ILI9881C_COMMAND_INSTR(0xab, 0x7b),
+ ILI9881C_COMMAND_INSTR(0xac, 0x18),
+ ILI9881C_COMMAND_INSTR(0xad, 0x17),
+ ILI9881C_COMMAND_INSTR(0xae, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xaf, 0x1f),
+ ILI9881C_COMMAND_INSTR(0xb0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xb1, 0x52),
+ ILI9881C_COMMAND_INSTR(0xb2, 0x63),
+ ILI9881C_COMMAND_INSTR(0xb3, 0x39),
+ ILI9881C_COMMAND_INSTR(0xc0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xc1, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xc2, 0x2c),
+ ILI9881C_COMMAND_INSTR(0xc3, 0x14),
+ ILI9881C_COMMAND_INSTR(0xc4, 0x19),
+ ILI9881C_COMMAND_INSTR(0xc5, 0x2e),
+ ILI9881C_COMMAND_INSTR(0xc6, 0x22),
+ ILI9881C_COMMAND_INSTR(0xc7, 0x23),
+ ILI9881C_COMMAND_INSTR(0xc8, 0x97),
+ ILI9881C_COMMAND_INSTR(0xc9, 0x1e),
+ ILI9881C_COMMAND_INSTR(0xca, 0x29),
+ ILI9881C_COMMAND_INSTR(0xcb, 0x7b),
+ ILI9881C_COMMAND_INSTR(0xcc, 0x18),
+ ILI9881C_COMMAND_INSTR(0xcd, 0x17),
+ ILI9881C_COMMAND_INSTR(0xce, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xcf, 0x1f),
+ ILI9881C_COMMAND_INSTR(0xd0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xd1, 0x52),
+ ILI9881C_COMMAND_INSTR(0xd2, 0x63),
+ ILI9881C_COMMAND_INSTR(0xd3, 0x39),
+};
+
static const struct ili9881c_instr rpi_7inch_init[] = {
ILI9881C_SWITCH_PAGE_INSTR(3),
ILI9881C_COMMAND_INSTR(0x01, 0x00),
@@ -1352,22 +1540,22 @@ static const struct ili9881c_instr rpi_7inch_init[] = {
ILI9881C_COMMAND_INSTR(0x87, 0x02),
ILI9881C_COMMAND_INSTR(0x88, 0x02),
ILI9881C_COMMAND_INSTR(0x89, 0x02),
- ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8a, 0x02),
ILI9881C_SWITCH_PAGE_INSTR(4),
- ILI9881C_COMMAND_INSTR(0x6C, 0x15),
- ILI9881C_COMMAND_INSTR(0x6E, 0x2A),
- ILI9881C_COMMAND_INSTR(0x6F, 0x33),
- ILI9881C_COMMAND_INSTR(0x3B, 0x98),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x2a),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x33),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x98),
ILI9881C_COMMAND_INSTR(0x3a, 0x94),
- ILI9881C_COMMAND_INSTR(0x8D, 0x14),
- ILI9881C_COMMAND_INSTR(0x87, 0xBA),
+ ILI9881C_COMMAND_INSTR(0x8d, 0x14),
+ ILI9881C_COMMAND_INSTR(0x87, 0xba),
ILI9881C_COMMAND_INSTR(0x26, 0x76),
- ILI9881C_COMMAND_INSTR(0xB2, 0xD1),
- ILI9881C_COMMAND_INSTR(0xB5, 0x06),
+ ILI9881C_COMMAND_INSTR(0xb2, 0xd1),
+ ILI9881C_COMMAND_INSTR(0xb5, 0x06),
ILI9881C_COMMAND_INSTR(0x38, 0x01),
ILI9881C_COMMAND_INSTR(0x39, 0x00),
ILI9881C_SWITCH_PAGE_INSTR(1),
- ILI9881C_COMMAND_INSTR(0x22, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0a),
ILI9881C_COMMAND_INSTR(0x31, 0x00),
ILI9881C_COMMAND_INSTR(0x53, 0x7d),
ILI9881C_COMMAND_INSTR(0x55, 0x8f),
@@ -1375,46 +1563,46 @@ static const struct ili9881c_instr rpi_7inch_init[] = {
ILI9881C_COMMAND_INSTR(0x50, 0x96),
ILI9881C_COMMAND_INSTR(0x51, 0x96),
ILI9881C_COMMAND_INSTR(0x60, 0x23),
- ILI9881C_COMMAND_INSTR(0xA0, 0x08),
- ILI9881C_COMMAND_INSTR(0xA1, 0x1d),
- ILI9881C_COMMAND_INSTR(0xA2, 0x2a),
- ILI9881C_COMMAND_INSTR(0xA3, 0x10),
- ILI9881C_COMMAND_INSTR(0xA4, 0x15),
- ILI9881C_COMMAND_INSTR(0xA5, 0x28),
- ILI9881C_COMMAND_INSTR(0xA6, 0x1c),
- ILI9881C_COMMAND_INSTR(0xA7, 0x1d),
- ILI9881C_COMMAND_INSTR(0xA8, 0x7e),
- ILI9881C_COMMAND_INSTR(0xA9, 0x1d),
- ILI9881C_COMMAND_INSTR(0xAA, 0x29),
- ILI9881C_COMMAND_INSTR(0xAB, 0x6b),
- ILI9881C_COMMAND_INSTR(0xAC, 0x1a),
- ILI9881C_COMMAND_INSTR(0xAD, 0x18),
- ILI9881C_COMMAND_INSTR(0xAE, 0x4b),
- ILI9881C_COMMAND_INSTR(0xAF, 0x20),
- ILI9881C_COMMAND_INSTR(0xB0, 0x27),
- ILI9881C_COMMAND_INSTR(0xB1, 0x50),
- ILI9881C_COMMAND_INSTR(0xB2, 0x64),
- ILI9881C_COMMAND_INSTR(0xB3, 0x39),
- ILI9881C_COMMAND_INSTR(0xC0, 0x08),
- ILI9881C_COMMAND_INSTR(0xC1, 0x1d),
- ILI9881C_COMMAND_INSTR(0xC2, 0x2a),
- ILI9881C_COMMAND_INSTR(0xC3, 0x10),
- ILI9881C_COMMAND_INSTR(0xC4, 0x15),
- ILI9881C_COMMAND_INSTR(0xC5, 0x28),
- ILI9881C_COMMAND_INSTR(0xC6, 0x1c),
- ILI9881C_COMMAND_INSTR(0xC7, 0x1d),
- ILI9881C_COMMAND_INSTR(0xC8, 0x7e),
- ILI9881C_COMMAND_INSTR(0xC9, 0x1d),
- ILI9881C_COMMAND_INSTR(0xCA, 0x29),
- ILI9881C_COMMAND_INSTR(0xCB, 0x6b),
- ILI9881C_COMMAND_INSTR(0xCC, 0x1a),
- ILI9881C_COMMAND_INSTR(0xCD, 0x18),
- ILI9881C_COMMAND_INSTR(0xCE, 0x4b),
- ILI9881C_COMMAND_INSTR(0xCF, 0x20),
- ILI9881C_COMMAND_INSTR(0xD0, 0x27),
- ILI9881C_COMMAND_INSTR(0xD1, 0x50),
- ILI9881C_COMMAND_INSTR(0xD2, 0x64),
- ILI9881C_COMMAND_INSTR(0xD3, 0x39),
+ ILI9881C_COMMAND_INSTR(0xa0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xa1, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xa2, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xa3, 0x10),
+ ILI9881C_COMMAND_INSTR(0xa4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xa5, 0x28),
+ ILI9881C_COMMAND_INSTR(0xa6, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xa7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xa8, 0x7e),
+ ILI9881C_COMMAND_INSTR(0xa9, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xaa, 0x29),
+ ILI9881C_COMMAND_INSTR(0xab, 0x6b),
+ ILI9881C_COMMAND_INSTR(0xac, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xad, 0x18),
+ ILI9881C_COMMAND_INSTR(0xae, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xaf, 0x20),
+ ILI9881C_COMMAND_INSTR(0xb0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xb1, 0x50),
+ ILI9881C_COMMAND_INSTR(0xb2, 0x64),
+ ILI9881C_COMMAND_INSTR(0xb3, 0x39),
+ ILI9881C_COMMAND_INSTR(0xc0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xc1, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xc2, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xc3, 0x10),
+ ILI9881C_COMMAND_INSTR(0xc4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xc5, 0x28),
+ ILI9881C_COMMAND_INSTR(0xc6, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xc7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xc8, 0x7e),
+ ILI9881C_COMMAND_INSTR(0xc9, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xca, 0x29),
+ ILI9881C_COMMAND_INSTR(0xcb, 0x6b),
+ ILI9881C_COMMAND_INSTR(0xcc, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xcd, 0x18),
+ ILI9881C_COMMAND_INSTR(0xce, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xcf, 0x20),
+ ILI9881C_COMMAND_INSTR(0xd0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xd1, 0x50),
+ ILI9881C_COMMAND_INSTR(0xd2, 0x64),
+ ILI9881C_COMMAND_INSTR(0xd3, 0x39),
};
static const struct ili9881c_instr bsd1218_a101kl68_init[] = {
@@ -1806,6 +1994,23 @@ static const struct drm_display_mode am8001280g_default_mode = {
.height_mm = 151,
};
+static const struct drm_display_mode rpi_5inch_default_mode = {
+ .clock = 83333,
+
+ .hdisplay = 720,
+ .hsync_start = 720 + 110,
+ .hsync_end = 720 + 110 + 12,
+ .htotal = 720 + 110 + 12 + 95,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 100,
+ .vsync_end = 1280 + 100 + 2,
+ .vtotal = 1280 + 100 + 2 + 100,
+
+ .width_mm = 62,
+ .height_mm = 110,
+};
+
static const struct drm_display_mode rpi_7inch_default_mode = {
.clock = 83330,
@@ -2000,6 +2205,14 @@ static const struct ili9881c_desc am8001280g_desc = {
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM,
};
+static const struct ili9881c_desc rpi_5inch_desc = {
+ .init = rpi_5inch_init,
+ .init_length = ARRAY_SIZE(rpi_5inch_init),
+ .mode = &rpi_5inch_default_mode,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM,
+ .lanes = 2,
+};
+
static const struct ili9881c_desc rpi_7inch_desc = {
.init = rpi_7inch_init,
.init_length = ARRAY_SIZE(rpi_7inch_init),
@@ -2025,6 +2238,7 @@ static const struct of_device_id ili9881c_of_match[] = {
{ .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc },
{ .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc },
{ .compatible = "ampire,am8001280g", .data = &am8001280g_desc },
+ { .compatible = "raspberrypi,dsi-5inch", &rpi_5inch_desc },
{ .compatible = "raspberrypi,dsi-7inch", &rpi_7inch_desc },
{ }
};
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq079l1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq079l1sx01.c
new file mode 100644
index 000000000000..8c00fde1c4a9
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sharp-lq079l1sx01.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016 XiaoMi, Inc.
+ * Copyright (c) 2024 Svyatoslav Ryhel <clamor95@gmail.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+static const struct regulator_bulk_data sharp_supplies[] = {
+ { .supply = "avdd" }, { .supply = "vddio" },
+ { .supply = "vsp" }, { .supply = "vsn" },
+};
+
+struct sharp_panel {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi[2];
+
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+
+ const struct drm_display_mode *mode;
+};
+
+static inline struct sharp_panel *to_sharp_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct sharp_panel, panel);
+}
+
+static void sharp_panel_reset(struct sharp_panel *sharp)
+{
+ gpiod_set_value_cansleep(sharp->reset_gpio, 1);
+ usleep_range(2000, 3000);
+ gpiod_set_value_cansleep(sharp->reset_gpio, 0);
+ usleep_range(2000, 3000);
+}
+
+static int sharp_panel_prepare(struct drm_panel *panel)
+{
+ struct sharp_panel *sharp = to_sharp_panel(panel);
+ struct device *dev = panel->dev;
+ struct mipi_dsi_device *dsi0 = sharp->dsi[0];
+ struct mipi_dsi_device *dsi1 = sharp->dsi[1];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL };
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(sharp_supplies), sharp->supplies);
+ if (ret) {
+ dev_err(dev, "error enabling regulators (%d)\n", ret);
+ return ret;
+ }
+
+ msleep(24);
+
+ if (sharp->reset_gpio)
+ sharp_panel_reset(sharp);
+
+ msleep(32);
+
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, MIPI_DCS_EXIT_SLEEP_MODE);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1,
+ MIPI_DCS_SET_DISPLAY_BRIGHTNESS, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1,
+ MIPI_DCS_WRITE_POWER_SAVE, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1,
+ MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c);
+
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, MIPI_DCS_SET_DISPLAY_ON);
+
+ return 0;
+}
+
+static int sharp_panel_unprepare(struct drm_panel *panel)
+{
+ struct sharp_panel *sharp = to_sharp_panel(panel);
+ struct mipi_dsi_device *dsi0 = sharp->dsi[0];
+ struct mipi_dsi_device *dsi1 = sharp->dsi[1];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL };
+
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, MIPI_DCS_SET_DISPLAY_OFF);
+ mipi_dsi_msleep(&dsi_ctx, 100);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, MIPI_DCS_ENTER_SLEEP_MODE);
+ mipi_dsi_msleep(&dsi_ctx, 150);
+
+ if (sharp->reset_gpio)
+ gpiod_set_value_cansleep(sharp->reset_gpio, 1);
+
+ return regulator_bulk_disable(ARRAY_SIZE(sharp_supplies), sharp->supplies);
+}
+
+static const struct drm_display_mode default_mode = {
+ .clock = (1536 + 136 + 28 + 28) * (2048 + 14 + 8 + 2) * 60 / 1000,
+ .hdisplay = 1536,
+ .hsync_start = 1536 + 136,
+ .hsync_end = 1536 + 136 + 28,
+ .htotal = 1536 + 136 + 28 + 28,
+ .vdisplay = 2048,
+ .vsync_start = 2048 + 14,
+ .vsync_end = 2048 + 14 + 8,
+ .vtotal = 2048 + 14 + 8 + 2,
+ .width_mm = 120,
+ .height_mm = 160,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static int sharp_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &default_mode);
+}
+
+static const struct drm_panel_funcs sharp_panel_funcs = {
+ .unprepare = sharp_panel_unprepare,
+ .prepare = sharp_panel_prepare,
+ .get_modes = sharp_panel_get_modes,
+};
+
+static int sharp_panel_probe(struct mipi_dsi_device *dsi)
+{
+ const struct mipi_dsi_device_info info = { "sharp-link1", 0, NULL };
+ struct device *dev = &dsi->dev;
+ struct device_node *dsi_r;
+ struct mipi_dsi_host *dsi_r_host;
+ struct sharp_panel *sharp;
+ int i, ret;
+
+ sharp = devm_drm_panel_alloc(dev, struct sharp_panel, panel,
+ &sharp_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(sharp))
+ return PTR_ERR(sharp);
+
+ ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(sharp_supplies),
+ sharp_supplies, &sharp->supplies);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get supplies\n");
+
+ sharp->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(sharp->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(sharp->reset_gpio),
+ "failed to get reset GPIO\n");
+
+ /* Panel is always connected to two DSI hosts, DSI0 is left, DSI1 is right */
+ dsi_r = of_graph_get_remote_node(dsi->dev.of_node, 1, -1);
+ if (!dsi_r)
+ return dev_err_probe(dev, -ENODEV, "failed to find second DSI host node\n");
+
+ dsi_r_host = of_find_mipi_dsi_host_by_node(dsi_r);
+ of_node_put(dsi_r);
+ if (!dsi_r_host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "cannot get secondary DSI host\n");
+
+ sharp->dsi[1] = devm_mipi_dsi_device_register_full(dev, dsi_r_host, &info);
+ if (IS_ERR(sharp->dsi[1]))
+ return dev_err_probe(dev, PTR_ERR(sharp->dsi[1]),
+ "second link registration failed\n");
+
+ sharp->dsi[0] = dsi;
+ mipi_dsi_set_drvdata(dsi, sharp);
+
+ ret = drm_panel_of_backlight(&sharp->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&sharp->panel);
+
+ for (i = 0; i < ARRAY_SIZE(sharp->dsi); i++) {
+ if (!sharp->dsi[i])
+ continue;
+
+ sharp->dsi[i]->lanes = 4;
+ sharp->dsi[i]->format = MIPI_DSI_FMT_RGB888;
+ sharp->dsi[i]->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM;
+
+ ret = devm_mipi_dsi_attach(dev, sharp->dsi[i]);
+ if (ret < 0) {
+ drm_panel_remove(&sharp->panel);
+ return dev_err_probe(dev, ret, "failed to attach to DSI%d\n", i);
+ }
+ }
+
+ return 0;
+}
+
+static void sharp_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct sharp_panel *sharp = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_remove(&sharp->panel);
+}
+
+static const struct of_device_id sharp_of_match[] = {
+ { .compatible = "sharp,lq079l1sx01" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sharp_of_match);
+
+static struct mipi_dsi_driver sharp_panel_driver = {
+ .driver = {
+ .name = "panel-sharp-lq079l1sx01",
+ .of_match_table = sharp_of_match,
+ },
+ .probe = sharp_panel_probe,
+ .remove = sharp_panel_remove,
+};
+module_mipi_dsi_driver(sharp_panel_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("Sharp LQ079L1SX01 panel driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 0019de93be1b..da6b71b70a46 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2889,6 +2889,38 @@ static const struct panel_desc innolux_zj070na_01p = {
},
};
+static const struct display_timing jutouch_jt101tm023_timing = {
+ .pixelclock = { 66300000, 72400000, 78900000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 12, 72, 132 },
+ .hback_porch = { 88, 88, 88 },
+ .hsync_len = { 10, 10, 48 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 1, 15, 49 },
+ .vback_porch = { 23, 23, 23 },
+ .vsync_len = { 5, 6, 13 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc jutouch_jt101tm023 = {
+ .timings = &jutouch_jt101tm023_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .delay = {
+ .enable = 50,
+ .disable = 50,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
+
static const struct display_timing koe_tx14d24vm1bpa_timing = {
.pixelclock = { 5580000, 5850000, 6200000 },
.hactive = { 320, 320, 320 },
@@ -5209,6 +5241,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,zj070na-01p",
.data = &innolux_zj070na_01p,
}, {
+ .compatible = "jutouch,jt101tm023",
+ .data = &jutouch_jt101tm023,
+ }, {
.compatible = "koe,tx14d24vm1bpa",
.data = &koe_tx14d24vm1bpa,
}, {
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
index 909c280eab1f..e5e688cf98fd 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -3,6 +3,7 @@
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
+#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/property.h>
@@ -20,6 +21,8 @@ struct visionox_rm69299_panel_desc {
const struct drm_display_mode *mode;
const u8 *init_seq;
unsigned int init_seq_len;
+ int max_brightness;
+ int initial_brightness;
};
struct visionox_rm69299 {
@@ -192,7 +195,7 @@ static int visionox_rm69299_unprepare(struct drm_panel *panel)
struct visionox_rm69299 *ctx = panel_to_ctx(panel);
struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
- ctx->dsi->mode_flags = 0;
+ ctx->dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
@@ -247,7 +250,7 @@ static const struct drm_display_mode visionox_rm69299_1080x2248_60hz = {
};
static const struct drm_display_mode visionox_rm69299_1080x2160_60hz = {
- .clock = 158695,
+ .clock = (2160 + 8 + 4 + 4) * (1080 + 26 + 2 + 36) * 60 / 1000,
.hdisplay = 1080,
.hsync_start = 1080 + 26,
.hsync_end = 1080 + 26 + 2,
@@ -285,6 +288,63 @@ static const struct drm_panel_funcs visionox_rm69299_drm_funcs = {
.get_modes = visionox_rm69299_get_modes,
};
+static int visionox_rm69299_bl_get_brightness(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return brightness;
+}
+
+static int visionox_rm69299_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static const struct backlight_ops visionox_rm69299_bl_ops = {
+ .update_status = visionox_rm69299_bl_update_status,
+ .get_brightness = visionox_rm69299_bl_get_brightness,
+};
+
+static struct backlight_device *
+visionox_rm69299_create_backlight(struct visionox_rm69299 *ctx)
+{
+ struct device *dev = &ctx->dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = ctx->desc->initial_brightness,
+ .max_brightness = ctx->desc->max_brightness,
+ };
+
+ if (!ctx->desc->max_brightness)
+ return 0;
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, ctx->dsi,
+ &visionox_rm69299_bl_ops,
+ &props);
+}
+
static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
@@ -316,6 +376,11 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(ctx->reset_gpio);
}
+ ctx->panel.backlight = visionox_rm69299_create_backlight(ctx);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
drm_panel_add(&ctx->panel);
dsi->lanes = 4;
@@ -353,6 +418,8 @@ const struct visionox_rm69299_panel_desc visionox_rm69299_shift_desc = {
.mode = &visionox_rm69299_1080x2160_60hz,
.init_seq = (const u8 *)visionox_rm69299_1080x2160_60hz_init_seq,
.init_seq_len = ARRAY_SIZE(visionox_rm69299_1080x2160_60hz_init_seq),
+ .max_brightness = 255,
+ .initial_brightness = 50,
};
static const struct of_device_id visionox_rm69299_of_match[] = {
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 5d0dce10336b..ac05df2a54fe 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -74,7 +74,7 @@ static int panfrost_devfreq_get_dev_status(struct device *dev,
spin_unlock_irqrestore(&pfdevfreq->lock, irqflags);
- dev_dbg(pfdev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n",
+ dev_dbg(pfdev->base.dev, "busy %lu total %lu %lu %% freq %lu MHz\n",
status->busy_time, status->total_time,
status->busy_time / (status->total_time / 100),
status->current_frequency / 1000 / 1000);
@@ -119,7 +119,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
int ret;
struct dev_pm_opp *opp;
unsigned long cur_freq;
- struct device *dev = &pfdev->pdev->dev;
+ struct device *dev = pfdev->base.dev;
struct devfreq *devfreq;
struct thermal_cooling_device *cooling;
struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 04bec27449cb..c61b97af120c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -20,9 +20,9 @@
static int panfrost_reset_init(struct panfrost_device *pfdev)
{
- pfdev->rstc = devm_reset_control_array_get_optional_exclusive(pfdev->dev);
+ pfdev->rstc = devm_reset_control_array_get_optional_exclusive(pfdev->base.dev);
if (IS_ERR(pfdev->rstc)) {
- dev_err(pfdev->dev, "get reset failed %ld\n", PTR_ERR(pfdev->rstc));
+ dev_err(pfdev->base.dev, "get reset failed %ld\n", PTR_ERR(pfdev->rstc));
return PTR_ERR(pfdev->rstc);
}
@@ -39,22 +39,22 @@ static int panfrost_clk_init(struct panfrost_device *pfdev)
int err;
unsigned long rate;
- pfdev->clock = devm_clk_get(pfdev->dev, NULL);
+ pfdev->clock = devm_clk_get(pfdev->base.dev, NULL);
if (IS_ERR(pfdev->clock)) {
- dev_err(pfdev->dev, "get clock failed %ld\n", PTR_ERR(pfdev->clock));
+ dev_err(pfdev->base.dev, "get clock failed %ld\n", PTR_ERR(pfdev->clock));
return PTR_ERR(pfdev->clock);
}
rate = clk_get_rate(pfdev->clock);
- dev_info(pfdev->dev, "clock rate = %lu\n", rate);
+ dev_info(pfdev->base.dev, "clock rate = %lu\n", rate);
err = clk_prepare_enable(pfdev->clock);
if (err)
return err;
- pfdev->bus_clock = devm_clk_get_optional(pfdev->dev, "bus");
+ pfdev->bus_clock = devm_clk_get_optional(pfdev->base.dev, "bus");
if (IS_ERR(pfdev->bus_clock)) {
- dev_err(pfdev->dev, "get bus_clock failed %ld\n",
+ dev_err(pfdev->base.dev, "get bus_clock failed %ld\n",
PTR_ERR(pfdev->bus_clock));
err = PTR_ERR(pfdev->bus_clock);
goto disable_clock;
@@ -62,7 +62,7 @@ static int panfrost_clk_init(struct panfrost_device *pfdev)
if (pfdev->bus_clock) {
rate = clk_get_rate(pfdev->bus_clock);
- dev_info(pfdev->dev, "bus_clock rate = %lu\n", rate);
+ dev_info(pfdev->base.dev, "bus_clock rate = %lu\n", rate);
err = clk_prepare_enable(pfdev->bus_clock);
if (err)
@@ -87,7 +87,7 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev)
{
int ret, i;
- pfdev->regulators = devm_kcalloc(pfdev->dev, pfdev->comp->num_supplies,
+ pfdev->regulators = devm_kcalloc(pfdev->base.dev, pfdev->comp->num_supplies,
sizeof(*pfdev->regulators),
GFP_KERNEL);
if (!pfdev->regulators)
@@ -96,12 +96,12 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev)
for (i = 0; i < pfdev->comp->num_supplies; i++)
pfdev->regulators[i].supply = pfdev->comp->supply_names[i];
- ret = devm_regulator_bulk_get(pfdev->dev,
+ ret = devm_regulator_bulk_get(pfdev->base.dev,
pfdev->comp->num_supplies,
pfdev->regulators);
if (ret < 0) {
if (ret != -EPROBE_DEFER)
- dev_err(pfdev->dev, "failed to get regulators: %d\n",
+ dev_err(pfdev->base.dev, "failed to get regulators: %d\n",
ret);
return ret;
}
@@ -109,7 +109,7 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev)
ret = regulator_bulk_enable(pfdev->comp->num_supplies,
pfdev->regulators);
if (ret < 0) {
- dev_err(pfdev->dev, "failed to enable regulators: %d\n", ret);
+ dev_err(pfdev->base.dev, "failed to enable regulators: %d\n", ret);
return ret;
}
@@ -144,7 +144,7 @@ static int panfrost_pm_domain_init(struct panfrost_device *pfdev)
int err;
int i, num_domains;
- num_domains = of_count_phandle_with_args(pfdev->dev->of_node,
+ num_domains = of_count_phandle_with_args(pfdev->base.dev->of_node,
"power-domains",
"#power-domain-cells");
@@ -156,7 +156,7 @@ static int panfrost_pm_domain_init(struct panfrost_device *pfdev)
return 0;
if (num_domains != pfdev->comp->num_pm_domains) {
- dev_err(pfdev->dev,
+ dev_err(pfdev->base.dev,
"Incorrect number of power domains: %d provided, %d needed\n",
num_domains, pfdev->comp->num_pm_domains);
return -EINVAL;
@@ -168,20 +168,21 @@ static int panfrost_pm_domain_init(struct panfrost_device *pfdev)
for (i = 0; i < num_domains; i++) {
pfdev->pm_domain_devs[i] =
- dev_pm_domain_attach_by_name(pfdev->dev,
- pfdev->comp->pm_domain_names[i]);
+ dev_pm_domain_attach_by_name(pfdev->base.dev,
+ pfdev->comp->pm_domain_names[i]);
if (IS_ERR_OR_NULL(pfdev->pm_domain_devs[i])) {
err = PTR_ERR(pfdev->pm_domain_devs[i]) ? : -ENODATA;
pfdev->pm_domain_devs[i] = NULL;
- dev_err(pfdev->dev,
+ dev_err(pfdev->base.dev,
"failed to get pm-domain %s(%d): %d\n",
pfdev->comp->pm_domain_names[i], i, err);
goto err;
}
- pfdev->pm_domain_links[i] = device_link_add(pfdev->dev,
- pfdev->pm_domain_devs[i], DL_FLAG_PM_RUNTIME |
- DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE);
+ pfdev->pm_domain_links[i] =
+ device_link_add(pfdev->base.dev,
+ pfdev->pm_domain_devs[i], DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE);
if (!pfdev->pm_domain_links[i]) {
dev_err(pfdev->pm_domain_devs[i],
"adding device link failed!\n");
@@ -220,20 +221,20 @@ int panfrost_device_init(struct panfrost_device *pfdev)
err = panfrost_reset_init(pfdev);
if (err) {
- dev_err(pfdev->dev, "reset init failed %d\n", err);
+ dev_err(pfdev->base.dev, "reset init failed %d\n", err);
goto out_pm_domain;
}
err = panfrost_clk_init(pfdev);
if (err) {
- dev_err(pfdev->dev, "clk init failed %d\n", err);
+ dev_err(pfdev->base.dev, "clk init failed %d\n", err);
goto out_reset;
}
err = panfrost_devfreq_init(pfdev);
if (err) {
if (err != -EPROBE_DEFER)
- dev_err(pfdev->dev, "devfreq init failed %d\n", err);
+ dev_err(pfdev->base.dev, "devfreq init failed %d\n", err);
goto out_clk;
}
@@ -244,7 +245,7 @@ int panfrost_device_init(struct panfrost_device *pfdev)
goto out_devfreq;
}
- pfdev->iomem = devm_platform_ioremap_resource(pfdev->pdev, 0);
+ pfdev->iomem = devm_platform_ioremap_resource(to_platform_device(pfdev->base.dev), 0);
if (IS_ERR(pfdev->iomem)) {
err = PTR_ERR(pfdev->iomem);
goto out_regulator;
@@ -258,7 +259,7 @@ int panfrost_device_init(struct panfrost_device *pfdev)
if (err)
goto out_gpu;
- err = panfrost_job_init(pfdev);
+ err = panfrost_jm_init(pfdev);
if (err)
goto out_mmu;
@@ -268,7 +269,7 @@ int panfrost_device_init(struct panfrost_device *pfdev)
return 0;
out_job:
- panfrost_job_fini(pfdev);
+ panfrost_jm_fini(pfdev);
out_mmu:
panfrost_mmu_fini(pfdev);
out_gpu:
@@ -289,7 +290,7 @@ out_pm_domain:
void panfrost_device_fini(struct panfrost_device *pfdev)
{
panfrost_perfcnt_fini(pfdev);
- panfrost_job_fini(pfdev);
+ panfrost_jm_fini(pfdev);
panfrost_mmu_fini(pfdev);
panfrost_gpu_fini(pfdev);
panfrost_devfreq_fini(pfdev);
@@ -399,13 +400,16 @@ bool panfrost_exception_needs_reset(const struct panfrost_device *pfdev,
return false;
}
-void panfrost_device_reset(struct panfrost_device *pfdev)
+void panfrost_device_reset(struct panfrost_device *pfdev, bool enable_job_int)
{
panfrost_gpu_soft_reset(pfdev);
panfrost_gpu_power_on(pfdev);
panfrost_mmu_reset(pfdev);
- panfrost_job_enable_interrupts(pfdev);
+
+ panfrost_jm_reset_interrupts(pfdev);
+ if (enable_job_int)
+ panfrost_jm_enable_interrupts(pfdev);
}
static int panfrost_device_runtime_resume(struct device *dev)
@@ -429,7 +433,7 @@ static int panfrost_device_runtime_resume(struct device *dev)
}
}
- panfrost_device_reset(pfdev);
+ panfrost_device_reset(pfdev, true);
panfrost_devfreq_resume(pfdev);
return 0;
@@ -447,11 +451,11 @@ static int panfrost_device_runtime_suspend(struct device *dev)
{
struct panfrost_device *pfdev = dev_get_drvdata(dev);
- if (!panfrost_job_is_idle(pfdev))
+ if (!panfrost_jm_is_idle(pfdev))
return -EBUSY;
panfrost_devfreq_suspend(pfdev);
- panfrost_job_suspend_irq(pfdev);
+ panfrost_jm_suspend_irq(pfdev);
panfrost_mmu_suspend_irq(pfdev);
panfrost_gpu_suspend_irq(pfdev);
panfrost_gpu_power_off(pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 077525a3ad68..e61c4329fd07 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -10,11 +10,13 @@
#include <linux/pm.h>
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
+#include <drm/drm_auth.h>
#include <drm/drm_device.h>
#include <drm/drm_mm.h>
#include <drm/gpu_scheduler.h>
#include "panfrost_devfreq.h"
+#include "panfrost_job.h"
struct panfrost_device;
struct panfrost_mmu;
@@ -22,9 +24,12 @@ struct panfrost_job_slot;
struct panfrost_job;
struct panfrost_perfcnt;
-#define NUM_JOB_SLOTS 3
#define MAX_PM_DOMAINS 5
+#define ALL_JS_INT_MASK \
+ (GENMASK(16 + NUM_JOB_SLOTS - 1, 16) | \
+ GENMASK(NUM_JOB_SLOTS - 1, 0))
+
enum panfrost_drv_comp_bits {
PANFROST_COMP_BIT_GPU,
PANFROST_COMP_BIT_JOB,
@@ -123,9 +128,7 @@ struct panfrost_device_debugfs {
};
struct panfrost_device {
- struct device *dev;
- struct drm_device *ddev;
- struct platform_device *pdev;
+ struct drm_device base;
int gpu_irq;
int mmu_irq;
@@ -144,7 +147,6 @@ struct panfrost_device {
DECLARE_BITMAP(is_suspended, PANFROST_COMP_BIT_MAX);
spinlock_t as_lock;
- unsigned long as_in_use_mask;
unsigned long as_alloc_mask;
unsigned long as_faulty_mask;
struct list_head as_lru_list;
@@ -206,16 +208,22 @@ struct panfrost_engine_usage {
struct panfrost_file_priv {
struct panfrost_device *pfdev;
- struct drm_sched_entity sched_entity[NUM_JOB_SLOTS];
+ struct xarray jm_ctxs;
struct panfrost_mmu *mmu;
struct panfrost_engine_usage engine_usage;
};
+static inline bool panfrost_high_prio_allowed(struct drm_file *file)
+{
+ /* Higher priorities require CAP_SYS_NICE or DRM_MASTER */
+ return (capable(CAP_SYS_NICE) || drm_is_current_master(file));
+}
+
static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev)
{
- return ddev->dev_private;
+ return container_of(ddev, struct panfrost_device, base);
}
static inline int panfrost_model_cmp(struct panfrost_device *pfdev, s32 id)
@@ -241,7 +249,7 @@ int panfrost_unstable_ioctl_check(void);
int panfrost_device_init(struct panfrost_device *pfdev);
void panfrost_device_fini(struct panfrost_device *pfdev);
-void panfrost_device_reset(struct panfrost_device *pfdev);
+void panfrost_device_reset(struct panfrost_device *pfdev, bool enable_job_int);
extern const struct dev_pm_ops panfrost_pm_ops;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 1ea6c509a5d5..1c3c574cd64a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -36,7 +36,7 @@ static int panfrost_ioctl_query_timestamp(struct panfrost_device *pfdev,
{
int ret;
- ret = pm_runtime_resume_and_get(pfdev->dev);
+ ret = pm_runtime_resume_and_get(pfdev->base.dev);
if (ret)
return ret;
@@ -44,14 +44,14 @@ static int panfrost_ioctl_query_timestamp(struct panfrost_device *pfdev,
*arg = panfrost_timestamp_read(pfdev);
panfrost_cycle_counter_put(pfdev);
- pm_runtime_put(pfdev->dev);
+ pm_runtime_put(pfdev->base.dev);
return 0;
}
static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file)
{
struct drm_panfrost_get_param *param = data;
- struct panfrost_device *pfdev = ddev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(ddev);
int ret;
if (param->pad != 0)
@@ -109,6 +109,14 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
#endif
break;
+ case DRM_PANFROST_PARAM_ALLOWED_JM_CTX_PRIORITIES:
+ param->value = BIT(PANFROST_JM_CTX_PRIORITY_LOW) |
+ BIT(PANFROST_JM_CTX_PRIORITY_MEDIUM);
+
+ if (panfrost_high_prio_allowed(file))
+ param->value |= BIT(PANFROST_JM_CTX_PRIORITY_HIGH);
+ break;
+
default:
return -EINVAL;
}
@@ -275,13 +283,17 @@ fail:
static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(dev);
struct panfrost_file_priv *file_priv = file->driver_priv;
struct drm_panfrost_submit *args = data;
struct drm_syncobj *sync_out = NULL;
+ struct panfrost_jm_ctx *jm_ctx;
struct panfrost_job *job;
int ret = 0, slot;
+ if (args->pad)
+ return -EINVAL;
+
if (!args->jc)
return -EINVAL;
@@ -294,10 +306,16 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
return -ENODEV;
}
+ jm_ctx = panfrost_jm_ctx_from_handle(file, args->jm_ctx_handle);
+ if (!jm_ctx) {
+ ret = -EINVAL;
+ goto out_put_syncout;
+ }
+
job = kzalloc(sizeof(*job), GFP_KERNEL);
if (!job) {
ret = -ENOMEM;
- goto out_put_syncout;
+ goto out_put_jm_ctx;
}
kref_init(&job->refcount);
@@ -307,12 +325,13 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
job->requirements = args->requirements;
job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
job->mmu = file_priv->mmu;
+ job->ctx = panfrost_jm_ctx_get(jm_ctx);
job->engine_usage = &file_priv->engine_usage;
slot = panfrost_job_get_slot(job);
ret = drm_sched_job_init(&job->base,
- &file_priv->sched_entity[slot],
+ &jm_ctx->slot_entity[slot],
1, NULL, file->client_id);
if (ret)
goto out_put_job;
@@ -338,6 +357,8 @@ out_cleanup_job:
drm_sched_job_cleanup(&job->base);
out_put_job:
panfrost_job_put(job);
+out_put_jm_ctx:
+ panfrost_jm_ctx_put(jm_ctx);
out_put_syncout:
if (sync_out)
drm_syncobj_put(sync_out);
@@ -436,7 +457,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
{
struct panfrost_file_priv *priv = file_priv->driver_priv;
struct drm_panfrost_madvise *args = data;
- struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(dev);
struct drm_gem_object *gem_obj;
struct panfrost_gem_object *bo;
int ret = 0;
@@ -536,6 +557,27 @@ err_put_obj:
return ret;
}
+static int panfrost_ioctl_jm_ctx_create(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ return panfrost_jm_ctx_create(file, data);
+}
+
+static int panfrost_ioctl_jm_ctx_destroy(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ const struct drm_panfrost_jm_ctx_destroy *args = data;
+
+ if (args->pad)
+ return -EINVAL;
+
+ /* We can't destroy the default context created when the file is opened. */
+ if (!args->handle)
+ return -EINVAL;
+
+ return panfrost_jm_ctx_destroy(file, args->handle);
+}
+
int panfrost_unstable_ioctl_check(void)
{
if (!unstable_ioctls)
@@ -548,7 +590,7 @@ static int
panfrost_open(struct drm_device *dev, struct drm_file *file)
{
int ret;
- struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(dev);
struct panfrost_file_priv *panfrost_priv;
panfrost_priv = kzalloc(sizeof(*panfrost_priv), GFP_KERNEL);
@@ -564,7 +606,7 @@ panfrost_open(struct drm_device *dev, struct drm_file *file)
goto err_free;
}
- ret = panfrost_job_open(panfrost_priv);
+ ret = panfrost_jm_open(file);
if (ret)
goto err_job;
@@ -583,7 +625,7 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file)
struct panfrost_file_priv *panfrost_priv = file->driver_priv;
panfrost_perfcnt_close(file);
- panfrost_job_close(panfrost_priv);
+ panfrost_jm_close(file);
panfrost_mmu_ctx_put(panfrost_priv->mmu);
kfree(panfrost_priv);
@@ -603,6 +645,8 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW),
PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW),
PANFROST_IOCTL(SET_LABEL_BO, set_label_bo, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(JM_CTX_CREATE, jm_ctx_create, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(JM_CTX_DESTROY, jm_ctx_destroy, DRM_RENDER_ALLOW),
};
static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev,
@@ -624,30 +668,25 @@ static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev,
* job spent on the GPU.
*/
- static const char * const engine_names[] = {
- "fragment", "vertex-tiler", "compute-only"
- };
-
- BUILD_BUG_ON(ARRAY_SIZE(engine_names) != NUM_JOB_SLOTS);
-
for (i = 0; i < NUM_JOB_SLOTS - 1; i++) {
if (pfdev->profile_mode) {
drm_printf(p, "drm-engine-%s:\t%llu ns\n",
- engine_names[i], panfrost_priv->engine_usage.elapsed_ns[i]);
+ panfrost_engine_names[i],
+ panfrost_priv->engine_usage.elapsed_ns[i]);
drm_printf(p, "drm-cycles-%s:\t%llu\n",
- engine_names[i], panfrost_priv->engine_usage.cycles[i]);
+ panfrost_engine_names[i],
+ panfrost_priv->engine_usage.cycles[i]);
}
drm_printf(p, "drm-maxfreq-%s:\t%lu Hz\n",
- engine_names[i], pfdev->pfdevfreq.fast_rate);
+ panfrost_engine_names[i], pfdev->pfdevfreq.fast_rate);
drm_printf(p, "drm-curfreq-%s:\t%lu Hz\n",
- engine_names[i], pfdev->pfdevfreq.current_frequency);
+ panfrost_engine_names[i], pfdev->pfdevfreq.current_frequency);
}
}
static void panfrost_show_fdinfo(struct drm_printer *p, struct drm_file *file)
{
- struct drm_device *dev = file->minor->dev;
- struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(file->minor->dev);
panfrost_gpu_show_fdinfo(pfdev, file->driver_priv, p);
@@ -664,16 +703,57 @@ static const struct file_operations panfrost_drm_driver_fops = {
static int panthor_gems_show(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(node->minor->dev);
panfrost_gem_debugfs_print_bos(pfdev, m);
return 0;
}
+static void show_panfrost_jm_ctx(struct panfrost_jm_ctx *jm_ctx, u32 handle,
+ struct seq_file *m)
+{
+ struct drm_device *ddev = ((struct drm_info_node *)m->private)->minor->dev;
+ const char *prio = "UNKNOWN";
+
+ static const char * const prios[] = {
+ [DRM_SCHED_PRIORITY_HIGH] = "HIGH",
+ [DRM_SCHED_PRIORITY_NORMAL] = "NORMAL",
+ [DRM_SCHED_PRIORITY_LOW] = "LOW",
+ };
+
+ if (jm_ctx->slot_entity[0].priority !=
+ jm_ctx->slot_entity[1].priority)
+ drm_warn(ddev, "Slot priorities should be the same in a single context");
+
+ if (jm_ctx->slot_entity[0].priority < ARRAY_SIZE(prios))
+ prio = prios[jm_ctx->slot_entity[0].priority];
+
+ seq_printf(m, " JM context %u: priority %s\n", handle, prio);
+}
+
+static int show_file_jm_ctxs(struct panfrost_file_priv *pfile,
+ struct seq_file *m)
+{
+ struct panfrost_jm_ctx *jm_ctx;
+ unsigned long i;
+
+ xa_lock(&pfile->jm_ctxs);
+ xa_for_each(&pfile->jm_ctxs, i, jm_ctx) {
+ jm_ctx = panfrost_jm_ctx_get(jm_ctx);
+ xa_unlock(&pfile->jm_ctxs);
+ show_panfrost_jm_ctx(jm_ctx, i, m);
+ panfrost_jm_ctx_put(jm_ctx);
+ xa_lock(&pfile->jm_ctxs);
+ }
+ xa_unlock(&pfile->jm_ctxs);
+
+ return 0;
+}
+
static struct drm_info_list panthor_debugfs_list[] = {
- {"gems", panthor_gems_show, 0, NULL},
+ {"gems",
+ panthor_gems_show, 0, NULL},
};
static int panthor_gems_debugfs_init(struct drm_minor *minor)
@@ -685,9 +765,64 @@ static int panthor_gems_debugfs_init(struct drm_minor *minor)
return 0;
}
+static int show_each_file(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *ddev = node->minor->dev;
+ int (*show)(struct panfrost_file_priv *, struct seq_file *) =
+ node->info_ent->data;
+ struct drm_file *file;
+ int ret;
+
+ ret = mutex_lock_interruptible(&ddev->filelist_mutex);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(file, &ddev->filelist, lhead) {
+ struct task_struct *task;
+ struct panfrost_file_priv *pfile = file->driver_priv;
+ struct pid *pid;
+
+ /*
+ * Although we have a valid reference on file->pid, that does
+ * not guarantee that the task_struct who called get_pid() is
+ * still alive (e.g. get_pid(current) => fork() => exit()).
+ * Therefore, we need to protect this ->comm access using RCU.
+ */
+ rcu_read_lock();
+ pid = rcu_dereference(file->pid);
+ task = pid_task(pid, PIDTYPE_TGID);
+ seq_printf(m, "client_id %8llu pid %8d command %s:\n",
+ file->client_id, pid_nr(pid),
+ task ? task->comm : "<unknown>");
+ rcu_read_unlock();
+
+ ret = show(pfile, m);
+ if (ret < 0)
+ break;
+
+ seq_puts(m, "\n");
+ }
+
+ mutex_unlock(&ddev->filelist_mutex);
+ return ret;
+}
+
+static struct drm_info_list panfrost_sched_debugfs_list[] = {
+ { "sched_ctxs", show_each_file, 0, show_file_jm_ctxs },
+};
+
+static void panfrost_sched_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_create_files(panfrost_sched_debugfs_list,
+ ARRAY_SIZE(panfrost_sched_debugfs_list),
+ minor->debugfs_root, minor);
+}
+
static void panfrost_debugfs_init(struct drm_minor *minor)
{
panthor_gems_debugfs_init(minor);
+ panfrost_sched_debugfs_init(minor);
}
#endif
@@ -699,6 +834,8 @@ static void panfrost_debugfs_init(struct drm_minor *minor)
* - 1.3 - adds JD_REQ_CYCLE_COUNT job requirement for SUBMIT
* - adds SYSTEM_TIMESTAMP and SYSTEM_TIMESTAMP_FREQUENCY queries
* - 1.4 - adds SET_LABEL_BO
+ * - 1.5 - adds JM_CTX_{CREATE,DESTROY} ioctls and extend SUBMIT to allow
+ * context creation with configurable priorities/affinity
*/
static const struct drm_driver panfrost_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
@@ -711,7 +848,7 @@ static const struct drm_driver panfrost_drm_driver = {
.name = "panfrost",
.desc = "panfrost DRM",
.major = 1,
- .minor = 4,
+ .minor = 5,
.gem_create_object = panfrost_gem_create_object,
.gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
@@ -723,15 +860,12 @@ static const struct drm_driver panfrost_drm_driver = {
static int panfrost_probe(struct platform_device *pdev)
{
struct panfrost_device *pfdev;
- struct drm_device *ddev;
int err;
- pfdev = devm_kzalloc(&pdev->dev, sizeof(*pfdev), GFP_KERNEL);
- if (!pfdev)
- return -ENOMEM;
-
- pfdev->pdev = pdev;
- pfdev->dev = &pdev->dev;
+ pfdev = devm_drm_dev_alloc(&pdev->dev, &panfrost_drm_driver,
+ struct panfrost_device, base);
+ if (IS_ERR(pfdev))
+ return PTR_ERR(pfdev);
platform_set_drvdata(pdev, pfdev);
@@ -741,14 +875,6 @@ static int panfrost_probe(struct platform_device *pdev)
pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT;
- /* Allocate and initialize the DRM device. */
- ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
- if (IS_ERR(ddev))
- return PTR_ERR(ddev);
-
- ddev->dev_private = pfdev;
- pfdev->ddev = ddev;
-
mutex_init(&pfdev->shrinker_lock);
INIT_LIST_HEAD(&pfdev->shrinker_list);
@@ -759,51 +885,47 @@ static int panfrost_probe(struct platform_device *pdev)
goto err_out0;
}
- pm_runtime_set_active(pfdev->dev);
- pm_runtime_mark_last_busy(pfdev->dev);
- pm_runtime_enable(pfdev->dev);
- pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */
- pm_runtime_use_autosuspend(pfdev->dev);
+ pm_runtime_set_active(pfdev->base.dev);
+ pm_runtime_mark_last_busy(pfdev->base.dev);
+ pm_runtime_enable(pfdev->base.dev);
+ pm_runtime_set_autosuspend_delay(pfdev->base.dev, 50); /* ~3 frames */
+ pm_runtime_use_autosuspend(pfdev->base.dev);
/*
* Register the DRM device with the core and the connectors with
* sysfs
*/
- err = drm_dev_register(ddev, 0);
+ err = drm_dev_register(&pfdev->base, 0);
if (err < 0)
goto err_out1;
- err = panfrost_gem_shrinker_init(ddev);
+ err = panfrost_gem_shrinker_init(&pfdev->base);
if (err)
goto err_out2;
return 0;
err_out2:
- drm_dev_unregister(ddev);
+ drm_dev_unregister(&pfdev->base);
err_out1:
- pm_runtime_disable(pfdev->dev);
+ pm_runtime_disable(pfdev->base.dev);
panfrost_device_fini(pfdev);
- pm_runtime_set_suspended(pfdev->dev);
+ pm_runtime_set_suspended(pfdev->base.dev);
err_out0:
- drm_dev_put(ddev);
return err;
}
static void panfrost_remove(struct platform_device *pdev)
{
struct panfrost_device *pfdev = platform_get_drvdata(pdev);
- struct drm_device *ddev = pfdev->ddev;
- drm_dev_unregister(ddev);
- panfrost_gem_shrinker_cleanup(ddev);
+ drm_dev_unregister(&pfdev->base);
+ panfrost_gem_shrinker_cleanup(&pfdev->base);
- pm_runtime_get_sync(pfdev->dev);
- pm_runtime_disable(pfdev->dev);
+ pm_runtime_get_sync(pfdev->base.dev);
+ pm_runtime_disable(pfdev->base.dev);
panfrost_device_fini(pfdev);
- pm_runtime_set_suspended(pfdev->dev);
-
- drm_dev_put(ddev);
+ pm_runtime_set_suspended(pfdev->base.dev);
}
static ssize_t profiling_show(struct device *dev,
diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c
index 4042afe2fbf4..3ed6c902d0a1 100644
--- a/drivers/gpu/drm/panfrost/panfrost_dump.c
+++ b/drivers/gpu/drm/panfrost/panfrost_dump.c
@@ -163,7 +163,7 @@ void panfrost_core_dump(struct panfrost_job *job)
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
__GFP_NORETRY);
if (!iter.start) {
- dev_warn(pfdev->dev, "failed to allocate devcoredump file\n");
+ dev_warn(pfdev->base.dev, "failed to allocate devcoredump file\n");
return;
}
@@ -204,14 +204,14 @@ void panfrost_core_dump(struct panfrost_job *job)
mapping = job->mappings[i];
if (!bo->base.sgt) {
- dev_err(pfdev->dev, "Panfrost Dump: BO has no sgt, cannot dump\n");
+ dev_err(pfdev->base.dev, "Panfrost Dump: BO has no sgt, cannot dump\n");
iter.hdr->bomap.valid = 0;
goto dump_header;
}
ret = drm_gem_vmap(&bo->base.base, &map);
if (ret) {
- dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n");
+ dev_err(pfdev->base.dev, "Panfrost Dump: couldn't map Buffer Object\n");
iter.hdr->bomap.valid = 0;
goto dump_header;
}
@@ -237,5 +237,5 @@ dump_header: panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BO, iter.data +
}
panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_TRAILER, iter.data);
- dev_coredumpv(pfdev->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
+ dev_coredumpv(pfdev->base.dev, iter.start, iter.data - iter.start, GFP_KERNEL);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 85d6289a6eda..0528de674a4f 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -26,7 +26,7 @@ static void panfrost_gem_debugfs_bo_add(struct panfrost_device *pfdev,
static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo)
{
- struct panfrost_device *pfdev = bo->base.base.dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(bo->base.base.dev);
if (list_empty(&bo->debugfs.node))
return;
@@ -48,7 +48,7 @@ static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo) {}
static void panfrost_gem_free_object(struct drm_gem_object *obj)
{
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
- struct panfrost_device *pfdev = obj->dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
/*
* Make sure the BO is no longer inserted in the shrinker list before
@@ -76,7 +76,7 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
for (i = 0; i < n_sgt; i++) {
if (bo->sgts[i].sgl) {
- dma_unmap_sgtable(pfdev->dev, &bo->sgts[i],
+ dma_unmap_sgtable(pfdev->base.dev, &bo->sgts[i],
DMA_BIDIRECTIONAL, 0);
sg_free_table(&bo->sgts[i]);
}
@@ -284,7 +284,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = {
*/
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
{
- struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(dev);
struct panfrost_gem_object *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index 02b60ea1433a..2fe967a90bcb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -97,7 +97,7 @@ panfrost_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
*/
int panfrost_gem_shrinker_init(struct drm_device *dev)
{
- struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(dev);
pfdev->shrinker = shrinker_alloc(0, "drm-panfrost");
if (!pfdev->shrinker)
@@ -120,7 +120,7 @@ int panfrost_gem_shrinker_init(struct drm_device *dev)
*/
void panfrost_gem_shrinker_cleanup(struct drm_device *dev)
{
- struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(dev);
if (pfdev->shrinker)
shrinker_free(pfdev->shrinker);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 174e190ba40f..8d049a07d393 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -36,12 +36,12 @@ static irqreturn_t panfrost_gpu_irq_handler(int irq, void *data)
u64 address = (u64) gpu_read(pfdev, GPU_FAULT_ADDRESS_HI) << 32;
address |= gpu_read(pfdev, GPU_FAULT_ADDRESS_LO);
- dev_warn(pfdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
+ dev_warn(pfdev->base.dev, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
fault_status, panfrost_exception_name(fault_status & 0xFF),
address);
if (state & GPU_IRQ_MULTIPLE_FAULT)
- dev_warn(pfdev->dev, "There were multiple GPU faults - some have not been reported\n");
+ dev_warn(pfdev->base.dev, "There were multiple GPU faults - some have not been reported\n");
gpu_write(pfdev, GPU_INT_MASK, 0);
}
@@ -72,13 +72,13 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
val, val & GPU_IRQ_RESET_COMPLETED, 10, 10000);
if (ret) {
- dev_err(pfdev->dev, "gpu soft reset timed out, attempting hard reset\n");
+ dev_err(pfdev->base.dev, "gpu soft reset timed out, attempting hard reset\n");
gpu_write(pfdev, GPU_CMD, GPU_CMD_HARD_RESET);
ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT, val,
val & GPU_IRQ_RESET_COMPLETED, 100, 10000);
if (ret) {
- dev_err(pfdev->dev, "gpu hard reset timed out\n");
+ dev_err(pfdev->base.dev, "gpu hard reset timed out\n");
return ret;
}
}
@@ -95,7 +95,7 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
* All in-flight jobs should have released their cycle
* counter references upon reset, but let us make sure
*/
- if (drm_WARN_ON(pfdev->ddev, atomic_read(&pfdev->cycle_counter.use_count) != 0))
+ if (drm_WARN_ON(&pfdev->base, atomic_read(&pfdev->cycle_counter.use_count) != 0))
atomic_set(&pfdev->cycle_counter.use_count, 0);
return 0;
@@ -240,9 +240,10 @@ static const struct panfrost_model gpu_models[] = {
/* MediaTek MT8188 Mali-G57 MC3 */
GPU_MODEL(g57, 0x9093,
GPU_REV(g57, 0, 0)),
+ {0},
};
-static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
+static int panfrost_gpu_init_features(struct panfrost_device *pfdev)
{
u32 gpu_id, num_js, major, minor, status, rev;
const char *name = "unknown";
@@ -327,16 +328,22 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
break;
}
+ if (!model->name) {
+ dev_err(pfdev->base.dev, "GPU model not found: mali-%s id rev %#x %#x\n",
+ name, gpu_id, rev);
+ return -ENODEV;
+ }
+
bitmap_from_u64(pfdev->features.hw_features, hw_feat);
bitmap_from_u64(pfdev->features.hw_issues, hw_issues);
- dev_info(pfdev->dev, "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x",
+ dev_info(pfdev->base.dev, "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x",
name, gpu_id, major, minor, status);
- dev_info(pfdev->dev, "features: %64pb, issues: %64pb",
+ dev_info(pfdev->base.dev, "features: %64pb, issues: %64pb",
pfdev->features.hw_features,
pfdev->features.hw_issues);
- dev_info(pfdev->dev, "Features: L2:0x%08x Shader:0x%08x Tiler:0x%08x Mem:0x%0x MMU:0x%08x AS:0x%x JS:0x%x",
+ dev_info(pfdev->base.dev, "Features: L2:0x%08x Shader:0x%08x Tiler:0x%08x Mem:0x%0x MMU:0x%08x AS:0x%x JS:0x%x",
pfdev->features.l2_features,
pfdev->features.core_features,
pfdev->features.tiler_features,
@@ -345,8 +352,10 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
pfdev->features.as_present,
pfdev->features.js_present);
- dev_info(pfdev->dev, "shader_present=0x%0llx l2_present=0x%0llx",
+ dev_info(pfdev->base.dev, "shader_present=0x%0llx l2_present=0x%0llx",
pfdev->features.shader_present, pfdev->features.l2_present);
+
+ return 0;
}
void panfrost_cycle_counter_get(struct panfrost_device *pfdev)
@@ -411,7 +420,7 @@ static u64 panfrost_get_core_mask(struct panfrost_device *pfdev)
*/
core_mask = ~(pfdev->features.l2_present - 1) &
(pfdev->features.l2_present - 2);
- dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n",
+ dev_info_once(pfdev->base.dev, "using only 1st core group (%lu cores from %lu)\n",
hweight64(core_mask),
hweight64(pfdev->features.shader_present));
@@ -432,7 +441,7 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
val, val == (pfdev->features.l2_present & core_mask),
10, 20000);
if (ret)
- dev_err(pfdev->dev, "error powering up gpu L2");
+ dev_err(pfdev->base.dev, "error powering up gpu L2");
gpu_write(pfdev, SHADER_PWRON_LO,
pfdev->features.shader_present & core_mask);
@@ -440,13 +449,13 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
val, val == (pfdev->features.shader_present & core_mask),
10, 20000);
if (ret)
- dev_err(pfdev->dev, "error powering up gpu shader");
+ dev_err(pfdev->base.dev, "error powering up gpu shader");
gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
val, val == pfdev->features.tiler_present, 10, 1000);
if (ret)
- dev_err(pfdev->dev, "error powering up gpu tiler");
+ dev_err(pfdev->base.dev, "error powering up gpu tiler");
}
void panfrost_gpu_power_off(struct panfrost_device *pfdev)
@@ -458,19 +467,19 @@ void panfrost_gpu_power_off(struct panfrost_device *pfdev)
ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO,
val, !val, 1, 2000);
if (ret)
- dev_err(pfdev->dev, "shader power transition timeout");
+ dev_err(pfdev->base.dev, "shader power transition timeout");
gpu_write(pfdev, TILER_PWROFF_LO, pfdev->features.tiler_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_PWRTRANS_LO,
val, !val, 1, 2000);
if (ret)
- dev_err(pfdev->dev, "tiler power transition timeout");
+ dev_err(pfdev->base.dev, "tiler power transition timeout");
gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present);
ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO,
val, !val, 0, 2000);
if (ret)
- dev_err(pfdev->dev, "l2 power transition timeout");
+ dev_err(pfdev->base.dev, "l2 power transition timeout");
}
void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev)
@@ -489,23 +498,26 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
if (err)
return err;
- panfrost_gpu_init_features(pfdev);
+ err = panfrost_gpu_init_features(pfdev);
+ if (err)
+ return err;
- err = dma_set_mask_and_coherent(pfdev->dev,
- DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features)));
+ err = dma_set_mask_and_coherent(pfdev->base.dev,
+ DMA_BIT_MASK(FIELD_GET(0xff00,
+ pfdev->features.mmu_features)));
if (err)
return err;
- dma_set_max_seg_size(pfdev->dev, UINT_MAX);
+ dma_set_max_seg_size(pfdev->base.dev, UINT_MAX);
- pfdev->gpu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu");
+ pfdev->gpu_irq = platform_get_irq_byname(to_platform_device(pfdev->base.dev), "gpu");
if (pfdev->gpu_irq < 0)
return pfdev->gpu_irq;
- err = devm_request_irq(pfdev->dev, pfdev->gpu_irq, panfrost_gpu_irq_handler,
+ err = devm_request_irq(pfdev->base.dev, pfdev->gpu_irq, panfrost_gpu_irq_handler,
IRQF_SHARED, KBUILD_MODNAME "-gpu", pfdev);
if (err) {
- dev_err(pfdev->dev, "failed to request gpu irq");
+ dev_err(pfdev->base.dev, "failed to request gpu irq");
return err;
}
@@ -525,9 +537,9 @@ u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev)
if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION)) {
/* Flush reduction only makes sense when the GPU is kept powered on between jobs */
- if (pm_runtime_get_if_in_use(pfdev->dev)) {
+ if (pm_runtime_get_if_in_use(pfdev->base.dev)) {
flush_id = gpu_read(pfdev, GPU_LATEST_FLUSH_ID);
- pm_runtime_put(pfdev->dev);
+ pm_runtime_put(pfdev->base.dev);
return flush_id;
}
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 82acabb21b27..11894a6b9fcc 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -22,11 +22,16 @@
#include "panfrost_mmu.h"
#include "panfrost_dump.h"
+#define MAX_JM_CTX_PER_FILE 64
#define JOB_TIMEOUT_MS 500
#define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
#define job_read(dev, reg) readl(dev->iomem + (reg))
+const char * const panfrost_engine_names[] = {
+ "fragment", "vertex-tiler", "compute-only"
+};
+
struct panfrost_queue_state {
struct drm_gpu_scheduler sched;
u64 fence_context;
@@ -94,7 +99,7 @@ static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, in
if (!fence)
return ERR_PTR(-ENOMEM);
- fence->dev = pfdev->ddev;
+ fence->dev = &pfdev->base;
fence->queue = js_num;
fence->seqno = ++js->queue[js_num].emit_seqno;
dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock,
@@ -195,7 +200,7 @@ panfrost_enqueue_job(struct panfrost_device *pfdev, int slot,
return 1;
}
-static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
+static int panfrost_job_hw_submit(struct panfrost_job *job, int js)
{
struct panfrost_device *pfdev = job->pfdev;
unsigned int subslot;
@@ -203,17 +208,22 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
u64 jc_head = job->jc;
int ret;
- panfrost_devfreq_record_busy(&pfdev->pfdevfreq);
-
- ret = pm_runtime_get_sync(pfdev->dev);
+ ret = pm_runtime_get_sync(pfdev->base.dev);
if (ret < 0)
- return;
+ goto err_hwsubmit;
if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) {
- return;
+ ret = -EINVAL;
+ goto err_hwsubmit;
}
- cfg = panfrost_mmu_as_get(pfdev, job->mmu);
+ ret = panfrost_mmu_as_get(pfdev, job->mmu);
+ if (ret < 0)
+ goto err_hwsubmit;
+
+ cfg = ret;
+
+ panfrost_devfreq_record_busy(&pfdev->pfdevfreq);
job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head));
job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head));
@@ -256,11 +266,17 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
}
job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
- dev_dbg(pfdev->dev,
+ dev_dbg(pfdev->base.dev,
"JS: Submitting atom %p to js[%d][%d] with head=0x%llx AS %d",
job, js, subslot, jc_head, cfg & 0xf);
}
spin_unlock(&pfdev->js->job_lock);
+
+ return 0;
+
+err_hwsubmit:
+ pm_runtime_put_autosuspend(pfdev->base.dev);
+ return ret;
}
static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
@@ -359,6 +375,7 @@ static void panfrost_job_cleanup(struct kref *ref)
kvfree(job->bos);
}
+ panfrost_jm_ctx_put(job->ctx);
kfree(job);
}
@@ -382,6 +399,10 @@ static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
struct panfrost_device *pfdev = job->pfdev;
int slot = panfrost_job_get_slot(job);
struct dma_fence *fence = NULL;
+ int ret;
+
+ if (job->ctx->destroyed)
+ return ERR_PTR(-ECANCELED);
if (unlikely(job->base.s_fence->finished.error))
return NULL;
@@ -400,27 +421,27 @@ static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
dma_fence_put(job->done_fence);
job->done_fence = dma_fence_get(fence);
- panfrost_job_hw_submit(job, slot);
+ ret = panfrost_job_hw_submit(job, slot);
+ if (ret) {
+ dma_fence_put(fence);
+ return ERR_PTR(ret);
+ }
return fence;
}
-void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
+void panfrost_jm_reset_interrupts(struct panfrost_device *pfdev)
{
- int j;
- u32 irq_mask = 0;
+ job_write(pfdev, JOB_INT_CLEAR, ALL_JS_INT_MASK);
+}
+void panfrost_jm_enable_interrupts(struct panfrost_device *pfdev)
+{
clear_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended);
-
- for (j = 0; j < NUM_JOB_SLOTS; j++) {
- irq_mask |= MK_JS_MASK(j);
- }
-
- job_write(pfdev, JOB_INT_CLEAR, irq_mask);
- job_write(pfdev, JOB_INT_MASK, irq_mask);
+ job_write(pfdev, JOB_INT_MASK, ALL_JS_INT_MASK);
}
-void panfrost_job_suspend_irq(struct panfrost_device *pfdev)
+void panfrost_jm_suspend_irq(struct panfrost_device *pfdev)
{
set_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended);
@@ -437,12 +458,12 @@ static void panfrost_job_handle_err(struct panfrost_device *pfdev,
bool signal_fence = true;
if (!panfrost_exception_is_fault(js_status)) {
- dev_dbg(pfdev->dev, "js event, js=%d, status=%s, head=0x%x, tail=0x%x",
+ dev_dbg(pfdev->base.dev, "js event, js=%d, status=%s, head=0x%x, tail=0x%x",
js, exception_name,
job_read(pfdev, JS_HEAD_LO(js)),
job_read(pfdev, JS_TAIL_LO(js)));
} else {
- dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
+ dev_err(pfdev->base.dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
js, exception_name,
job_read(pfdev, JS_HEAD_LO(js)),
job_read(pfdev, JS_TAIL_LO(js)));
@@ -474,7 +495,7 @@ static void panfrost_job_handle_err(struct panfrost_device *pfdev,
if (signal_fence)
dma_fence_signal_locked(job->done_fence);
- pm_runtime_put_autosuspend(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->base.dev);
if (panfrost_exception_needs_reset(pfdev, js_status)) {
atomic_set(&pfdev->reset.pending, 1);
@@ -482,8 +503,8 @@ static void panfrost_job_handle_err(struct panfrost_device *pfdev,
}
}
-static void panfrost_job_handle_done(struct panfrost_device *pfdev,
- struct panfrost_job *job)
+static void panfrost_jm_handle_done(struct panfrost_device *pfdev,
+ struct panfrost_job *job)
{
/* Set ->jc to 0 to avoid re-submitting an already finished job (can
* happen when we receive the DONE interrupt while doing a GPU reset).
@@ -493,10 +514,10 @@ static void panfrost_job_handle_done(struct panfrost_device *pfdev,
panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
dma_fence_signal_locked(job->done_fence);
- pm_runtime_put_autosuspend(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->base.dev);
}
-static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status)
+static void panfrost_jm_handle_irq(struct panfrost_device *pfdev, u32 status)
{
struct panfrost_job *done[NUM_JOB_SLOTS][2] = {};
struct panfrost_job *failed[NUM_JOB_SLOTS] = {};
@@ -571,7 +592,7 @@ static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status)
}
for (i = 0; i < ARRAY_SIZE(done[0]) && done[j][i]; i++)
- panfrost_job_handle_done(pfdev, done[j][i]);
+ panfrost_jm_handle_done(pfdev, done[j][i]);
}
/* And finally we requeue jobs that were waiting in the second slot
@@ -589,7 +610,7 @@ static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status)
struct panfrost_job *canceled = panfrost_dequeue_job(pfdev, j);
dma_fence_set_error(canceled->done_fence, -ECANCELED);
- panfrost_job_handle_done(pfdev, canceled);
+ panfrost_jm_handle_done(pfdev, canceled);
} else if (!atomic_read(&pfdev->reset.pending)) {
/* Requeue the job we removed if no reset is pending */
job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_START);
@@ -597,15 +618,15 @@ static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status)
}
}
-static void panfrost_job_handle_irqs(struct panfrost_device *pfdev)
+static void panfrost_jm_handle_irqs(struct panfrost_device *pfdev)
{
u32 status = job_read(pfdev, JOB_INT_RAWSTAT);
while (status) {
- pm_runtime_mark_last_busy(pfdev->dev);
+ pm_runtime_mark_last_busy(pfdev->base.dev);
spin_lock(&pfdev->js->job_lock);
- panfrost_job_handle_irq(pfdev, status);
+ panfrost_jm_handle_irq(pfdev, status);
spin_unlock(&pfdev->js->job_lock);
status = job_read(pfdev, JOB_INT_RAWSTAT);
}
@@ -683,10 +704,10 @@ panfrost_reset(struct panfrost_device *pfdev,
10, 10000);
if (ret)
- dev_err(pfdev->dev, "Soft-stop failed\n");
+ dev_err(pfdev->base.dev, "Soft-stop failed\n");
/* Handle the remaining interrupts before we reset. */
- panfrost_job_handle_irqs(pfdev);
+ panfrost_jm_handle_irqs(pfdev);
/* Remaining interrupts have been handled, but we might still have
* stuck jobs. Let's make sure the PM counters stay balanced by
@@ -701,7 +722,7 @@ panfrost_reset(struct panfrost_device *pfdev,
if (pfdev->jobs[i][j]->requirements & PANFROST_JD_REQ_CYCLE_COUNT ||
pfdev->jobs[i][j]->is_profiled)
panfrost_cycle_counter_put(pfdev->jobs[i][j]->pfdev);
- pm_runtime_put_noidle(pfdev->dev);
+ pm_runtime_put_noidle(pfdev->base.dev);
panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
}
}
@@ -709,12 +730,7 @@ panfrost_reset(struct panfrost_device *pfdev,
spin_unlock(&pfdev->js->job_lock);
/* Proceed with reset now. */
- panfrost_device_reset(pfdev);
-
- /* panfrost_device_reset() unmasks job interrupts, but we want to
- * keep them masked a bit longer.
- */
- job_write(pfdev, JOB_INT_MASK, 0);
+ panfrost_device_reset(pfdev, false);
/* GPU has been reset, we can clear the reset pending bit. */
atomic_set(&pfdev->reset.pending, 0);
@@ -736,9 +752,7 @@ panfrost_reset(struct panfrost_device *pfdev,
drm_sched_start(&pfdev->js->queue[i].sched, 0);
/* Re-enable job interrupts now that everything has been restarted. */
- job_write(pfdev, JOB_INT_MASK,
- GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
- GENMASK(NUM_JOB_SLOTS - 1, 0));
+ panfrost_jm_enable_interrupts(pfdev);
dma_fence_end_signalling(cookie);
}
@@ -769,11 +783,11 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
synchronize_irq(pfdev->js->irq);
if (dma_fence_is_signaled(job->done_fence)) {
- dev_warn(pfdev->dev, "unexpectedly high interrupt latency\n");
+ dev_warn(pfdev->base.dev, "unexpectedly high interrupt latency\n");
return DRM_GPU_SCHED_STAT_NO_HANG;
}
- dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
+ dev_err(pfdev->base.dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
js,
job_read(pfdev, JS_CONFIG(js)),
job_read(pfdev, JS_STATUS(js)),
@@ -803,22 +817,20 @@ static const struct drm_sched_backend_ops panfrost_sched_ops = {
.free_job = panfrost_job_free
};
-static irqreturn_t panfrost_job_irq_handler_thread(int irq, void *data)
+static irqreturn_t panfrost_jm_irq_handler_thread(int irq, void *data)
{
struct panfrost_device *pfdev = data;
- panfrost_job_handle_irqs(pfdev);
+ panfrost_jm_handle_irqs(pfdev);
/* Enable interrupts only if we're not about to get suspended */
if (!test_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended))
- job_write(pfdev, JOB_INT_MASK,
- GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
- GENMASK(NUM_JOB_SLOTS - 1, 0));
+ job_write(pfdev, JOB_INT_MASK, ALL_JS_INT_MASK);
return IRQ_HANDLED;
}
-static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
+static irqreturn_t panfrost_jm_irq_handler(int irq, void *data)
{
struct panfrost_device *pfdev = data;
u32 status;
@@ -834,19 +846,20 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
return IRQ_WAKE_THREAD;
}
-int panfrost_job_init(struct panfrost_device *pfdev)
+int panfrost_jm_init(struct panfrost_device *pfdev)
{
struct drm_sched_init_args args = {
.ops = &panfrost_sched_ops,
.num_rqs = DRM_SCHED_PRIORITY_COUNT,
.credit_limit = 2,
.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
- .name = "pan_js",
- .dev = pfdev->dev,
+ .dev = pfdev->base.dev,
};
struct panfrost_job_slot *js;
int ret, j;
+ BUILD_BUG_ON(ARRAY_SIZE(panfrost_engine_names) != NUM_JOB_SLOTS);
+
/* All GPUs have two entries per queue, but without jobchain
* disambiguation stopping the right job in the close path is tricky,
* so let's just advertise one entry in that case.
@@ -854,24 +867,25 @@ int panfrost_job_init(struct panfrost_device *pfdev)
if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION))
args.credit_limit = 1;
- pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
+ js = devm_kzalloc(pfdev->base.dev, sizeof(*js), GFP_KERNEL);
if (!js)
return -ENOMEM;
+ pfdev->js = js;
INIT_WORK(&pfdev->reset.work, panfrost_reset_work);
spin_lock_init(&js->job_lock);
- js->irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job");
+ js->irq = platform_get_irq_byname(to_platform_device(pfdev->base.dev), "job");
if (js->irq < 0)
return js->irq;
- ret = devm_request_threaded_irq(pfdev->dev, js->irq,
- panfrost_job_irq_handler,
- panfrost_job_irq_handler_thread,
+ ret = devm_request_threaded_irq(pfdev->base.dev, js->irq,
+ panfrost_jm_irq_handler,
+ panfrost_jm_irq_handler_thread,
IRQF_SHARED, KBUILD_MODNAME "-job",
pfdev);
if (ret) {
- dev_err(pfdev->dev, "failed to request job irq");
+ dev_err(pfdev->base.dev, "failed to request job irq");
return ret;
}
@@ -882,15 +896,17 @@ int panfrost_job_init(struct panfrost_device *pfdev)
for (j = 0; j < NUM_JOB_SLOTS; j++) {
js->queue[j].fence_context = dma_fence_context_alloc(1);
+ args.name = panfrost_engine_names[j];
ret = drm_sched_init(&js->queue[j].sched, &args);
if (ret) {
- dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
+ dev_err(pfdev->base.dev, "Failed to create scheduler: %d.", ret);
goto err_sched;
}
}
- panfrost_job_enable_interrupts(pfdev);
+ panfrost_jm_reset_interrupts(pfdev);
+ panfrost_jm_enable_interrupts(pfdev);
return 0;
@@ -902,7 +918,7 @@ err_sched:
return ret;
}
-void panfrost_job_fini(struct panfrost_device *pfdev)
+void panfrost_jm_fini(struct panfrost_device *pfdev)
{
struct panfrost_job_slot *js = pfdev->js;
int j;
@@ -917,39 +933,176 @@ void panfrost_job_fini(struct panfrost_device *pfdev)
destroy_workqueue(pfdev->reset.wq);
}
-int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
+int panfrost_jm_open(struct drm_file *file)
+{
+ struct panfrost_file_priv *panfrost_priv = file->driver_priv;
+ int ret;
+
+ struct drm_panfrost_jm_ctx_create default_jm_ctx = {
+ .priority = PANFROST_JM_CTX_PRIORITY_MEDIUM,
+ };
+
+ xa_init_flags(&panfrost_priv->jm_ctxs, XA_FLAGS_ALLOC);
+
+ ret = panfrost_jm_ctx_create(file, &default_jm_ctx);
+ if (ret)
+ return ret;
+
+ /* We expect the default context to be assigned handle 0. */
+ if (WARN_ON(default_jm_ctx.handle))
+ return -EINVAL;
+
+ return 0;
+}
+
+void panfrost_jm_close(struct drm_file *file)
+{
+ struct panfrost_file_priv *panfrost_priv = file->driver_priv;
+ struct panfrost_jm_ctx *jm_ctx;
+ unsigned long i;
+
+ xa_for_each(&panfrost_priv->jm_ctxs, i, jm_ctx)
+ panfrost_jm_ctx_destroy(file, i);
+
+ xa_destroy(&panfrost_priv->jm_ctxs);
+}
+
+int panfrost_jm_is_idle(struct panfrost_device *pfdev)
{
- struct panfrost_device *pfdev = panfrost_priv->pfdev;
struct panfrost_job_slot *js = pfdev->js;
- struct drm_gpu_scheduler *sched;
- int ret, i;
+ int i;
for (i = 0; i < NUM_JOB_SLOTS; i++) {
- sched = &js->queue[i].sched;
- ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
- DRM_SCHED_PRIORITY_NORMAL, &sched,
- 1, NULL);
- if (WARN_ON(ret))
- return ret;
+ /* If there are any jobs in the HW queue, we're not idle */
+ if (atomic_read(&js->queue[i].sched.credit_count))
+ return false;
+ }
+
+ return true;
+}
+
+static void panfrost_jm_ctx_release(struct kref *kref)
+{
+ struct panfrost_jm_ctx *jm_ctx = container_of(kref, struct panfrost_jm_ctx, refcnt);
+
+ WARN_ON(!jm_ctx->destroyed);
+
+ for (u32 i = 0; i < ARRAY_SIZE(jm_ctx->slot_entity); i++)
+ drm_sched_entity_destroy(&jm_ctx->slot_entity[i]);
+
+ kfree(jm_ctx);
+}
+
+void
+panfrost_jm_ctx_put(struct panfrost_jm_ctx *jm_ctx)
+{
+ if (jm_ctx)
+ kref_put(&jm_ctx->refcnt, panfrost_jm_ctx_release);
+}
+
+struct panfrost_jm_ctx *
+panfrost_jm_ctx_get(struct panfrost_jm_ctx *jm_ctx)
+{
+ if (jm_ctx)
+ kref_get(&jm_ctx->refcnt);
+
+ return jm_ctx;
+}
+
+struct panfrost_jm_ctx *
+panfrost_jm_ctx_from_handle(struct drm_file *file, u32 handle)
+{
+ struct panfrost_file_priv *priv = file->driver_priv;
+ struct panfrost_jm_ctx *jm_ctx;
+
+ xa_lock(&priv->jm_ctxs);
+ jm_ctx = panfrost_jm_ctx_get(xa_load(&priv->jm_ctxs, handle));
+ xa_unlock(&priv->jm_ctxs);
+
+ return jm_ctx;
+}
+
+static int jm_ctx_prio_to_drm_sched_prio(struct drm_file *file,
+ enum drm_panfrost_jm_ctx_priority in,
+ enum drm_sched_priority *out)
+{
+ switch (in) {
+ case PANFROST_JM_CTX_PRIORITY_LOW:
+ *out = DRM_SCHED_PRIORITY_LOW;
+ return 0;
+ case PANFROST_JM_CTX_PRIORITY_MEDIUM:
+ *out = DRM_SCHED_PRIORITY_NORMAL;
+ return 0;
+ case PANFROST_JM_CTX_PRIORITY_HIGH:
+ if (!panfrost_high_prio_allowed(file))
+ return -EACCES;
+
+ *out = DRM_SCHED_PRIORITY_HIGH;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+int panfrost_jm_ctx_create(struct drm_file *file,
+ struct drm_panfrost_jm_ctx_create *args)
+{
+ struct panfrost_file_priv *priv = file->driver_priv;
+ struct panfrost_device *pfdev = priv->pfdev;
+ enum drm_sched_priority sched_prio;
+ struct panfrost_jm_ctx *jm_ctx;
+ int ret;
+
+ jm_ctx = kzalloc(sizeof(*jm_ctx), GFP_KERNEL);
+ if (!jm_ctx)
+ return -ENOMEM;
+
+ kref_init(&jm_ctx->refcnt);
+
+ ret = jm_ctx_prio_to_drm_sched_prio(file, args->priority, &sched_prio);
+ if (ret)
+ goto err_put_jm_ctx;
+
+ for (u32 i = 0; i < NUM_JOB_SLOTS; i++) {
+ struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
+
+ ret = drm_sched_entity_init(&jm_ctx->slot_entity[i], sched_prio,
+ &sched, 1, NULL);
+ if (ret)
+ goto err_put_jm_ctx;
}
+
+ ret = xa_alloc(&priv->jm_ctxs, &args->handle, jm_ctx,
+ XA_LIMIT(0, MAX_JM_CTX_PER_FILE), GFP_KERNEL);
+ if (ret)
+ goto err_put_jm_ctx;
+
return 0;
+
+err_put_jm_ctx:
+ jm_ctx->destroyed = true;
+ panfrost_jm_ctx_put(jm_ctx);
+ return ret;
}
-void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
+int panfrost_jm_ctx_destroy(struct drm_file *file, u32 handle)
{
- struct panfrost_device *pfdev = panfrost_priv->pfdev;
- int i;
+ struct panfrost_file_priv *priv = file->driver_priv;
+ struct panfrost_device *pfdev = priv->pfdev;
+ struct panfrost_jm_ctx *jm_ctx;
- for (i = 0; i < NUM_JOB_SLOTS; i++)
- drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
+ jm_ctx = xa_erase(&priv->jm_ctxs, handle);
+ if (!jm_ctx)
+ return -EINVAL;
+
+ jm_ctx->destroyed = true;
/* Kill in-flight jobs */
spin_lock(&pfdev->js->job_lock);
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- struct drm_sched_entity *entity = &panfrost_priv->sched_entity[i];
- int j;
+ for (u32 i = 0; i < ARRAY_SIZE(jm_ctx->slot_entity); i++) {
+ struct drm_sched_entity *entity = &jm_ctx->slot_entity[i];
- for (j = ARRAY_SIZE(pfdev->jobs[0]) - 1; j >= 0; j--) {
+ for (int j = ARRAY_SIZE(pfdev->jobs[0]) - 1; j >= 0; j--) {
struct panfrost_job *job = pfdev->jobs[i][j];
u32 cmd;
@@ -980,18 +1133,7 @@ void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
}
}
spin_unlock(&pfdev->js->job_lock);
-}
-
-int panfrost_job_is_idle(struct panfrost_device *pfdev)
-{
- struct panfrost_job_slot *js = pfdev->js;
- int i;
-
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- /* If there are any jobs in the HW queue, we're not idle */
- if (atomic_read(&js->queue[i].sched.credit_count))
- return false;
- }
- return true;
+ panfrost_jm_ctx_put(jm_ctx);
+ return 0;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
index ec581b97852b..c3f57e41a571 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.h
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -18,6 +18,7 @@ struct panfrost_job {
struct panfrost_device *pfdev;
struct panfrost_mmu *mmu;
+ struct panfrost_jm_ctx *ctx;
/* Fence to be signaled by IRQ handler when the job is complete. */
struct dma_fence *done_fence;
@@ -39,15 +40,38 @@ struct panfrost_job {
u64 start_cycles;
};
-int panfrost_job_init(struct panfrost_device *pfdev);
-void panfrost_job_fini(struct panfrost_device *pfdev);
-int panfrost_job_open(struct panfrost_file_priv *panfrost_priv);
-void panfrost_job_close(struct panfrost_file_priv *panfrost_priv);
+struct panfrost_js_ctx {
+ struct drm_sched_entity sched_entity;
+ bool enabled;
+};
+
+#define NUM_JOB_SLOTS 3
+
+struct panfrost_jm_ctx {
+ struct kref refcnt;
+ bool destroyed;
+ struct drm_sched_entity slot_entity[NUM_JOB_SLOTS];
+};
+
+extern const char * const panfrost_engine_names[];
+
+int panfrost_jm_ctx_create(struct drm_file *file,
+ struct drm_panfrost_jm_ctx_create *args);
+int panfrost_jm_ctx_destroy(struct drm_file *file, u32 handle);
+void panfrost_jm_ctx_put(struct panfrost_jm_ctx *jm_ctx);
+struct panfrost_jm_ctx *panfrost_jm_ctx_get(struct panfrost_jm_ctx *jm_ctx);
+struct panfrost_jm_ctx *panfrost_jm_ctx_from_handle(struct drm_file *file, u32 handle);
+
+int panfrost_jm_init(struct panfrost_device *pfdev);
+void panfrost_jm_fini(struct panfrost_device *pfdev);
+int panfrost_jm_open(struct drm_file *file);
+void panfrost_jm_close(struct drm_file *file);
+void panfrost_jm_reset_interrupts(struct panfrost_device *pfdev);
+void panfrost_jm_enable_interrupts(struct panfrost_device *pfdev);
+void panfrost_jm_suspend_irq(struct panfrost_device *pfdev);
+int panfrost_jm_is_idle(struct panfrost_device *pfdev);
int panfrost_job_get_slot(struct panfrost_job *job);
int panfrost_job_push(struct panfrost_job *job);
void panfrost_job_put(struct panfrost_job *job);
-void panfrost_job_enable_interrupts(struct panfrost_device *pfdev);
-void panfrost_job_suspend_irq(struct panfrost_device *pfdev);
-int panfrost_job_is_idle(struct panfrost_device *pfdev);
#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index f6b91c052cfb..02ccc05e23bb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -81,7 +81,7 @@ static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
if (ret) {
/* The GPU hung, let's trigger a reset */
panfrost_device_schedule_reset(pfdev);
- dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
+ dev_err(pfdev->base.dev, "AS_ACTIVE bit stuck\n");
}
return ret;
@@ -222,7 +222,7 @@ static int mmu_cfg_init_aarch64_4k(struct panfrost_mmu *mmu)
struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg;
struct panfrost_device *pfdev = mmu->pfdev;
- if (drm_WARN_ON(pfdev->ddev, pgtbl_cfg->arm_lpae_s1_cfg.ttbr &
+ if (drm_WARN_ON(&pfdev->base, pgtbl_cfg->arm_lpae_s1_cfg.ttbr &
~AS_TRANSTAB_AARCH64_4K_ADDR_MASK))
return -EINVAL;
@@ -253,12 +253,12 @@ static int panfrost_mmu_cfg_init(struct panfrost_mmu *mmu,
return mmu_cfg_init_mali_lpae(mmu);
default:
/* This should never happen */
- drm_WARN(pfdev->ddev, 1, "Invalid pgtable format");
+ drm_WARN(&pfdev->base, 1, "Invalid pgtable format");
return -EINVAL;
}
}
-u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
+int panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{
int as;
@@ -300,7 +300,10 @@ u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
if (!atomic_read(&lru_mmu->as_count))
break;
}
- WARN_ON(&lru_mmu->list == &pfdev->as_lru_list);
+ if (WARN_ON(&lru_mmu->list == &pfdev->as_lru_list)) {
+ as = -EBUSY;
+ goto out;
+ }
list_del_init(&lru_mmu->list);
as = lru_mmu->as;
@@ -315,7 +318,9 @@ u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
atomic_set(&mmu->as_count, 1);
list_add(&mmu->list, &pfdev->as_lru_list);
- dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
+ dev_dbg(pfdev->base.dev,
+ "Assigned AS%d to mmu %p, alloc_mask=%lx",
+ as, mmu, pfdev->as_alloc_mask);
panfrost_mmu_enable(pfdev, mmu);
@@ -381,13 +386,30 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
if (mmu->as < 0)
return;
- pm_runtime_get_noresume(pfdev->dev);
+ pm_runtime_get_noresume(pfdev->base.dev);
/* Flush the PTs only if we're already awake */
- if (pm_runtime_active(pfdev->dev))
+ if (pm_runtime_active(pfdev->base.dev))
mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
- pm_runtime_put_autosuspend(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->base.dev);
+}
+
+static void mmu_unmap_range(struct panfrost_mmu *mmu, u64 iova, size_t len)
+{
+ struct io_pgtable_ops *ops = mmu->pgtbl_ops;
+ size_t pgsize, unmapped_len = 0;
+ size_t unmapped_page, pgcount;
+
+ while (unmapped_len < len) {
+ pgsize = get_pgsize(iova, len - unmapped_len, &pgcount);
+
+ unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL);
+ WARN_ON(unmapped_page != pgsize * pgcount);
+
+ iova += pgsize * pgcount;
+ unmapped_len += pgsize * pgcount;
+ }
}
static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
@@ -396,22 +418,30 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
unsigned int count;
struct scatterlist *sgl;
struct io_pgtable_ops *ops = mmu->pgtbl_ops;
+ size_t total_mapped = 0;
u64 start_iova = iova;
+ int ret;
for_each_sgtable_dma_sg(sgt, sgl, count) {
unsigned long paddr = sg_dma_address(sgl);
size_t len = sg_dma_len(sgl);
- dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
+ dev_dbg(pfdev->base.dev,
+ "map: as=%d, iova=%llx, paddr=%lx, len=%zx",
+ mmu->as, iova, paddr, len);
while (len) {
size_t pgcount, mapped = 0;
size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
- ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
+ ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
GFP_KERNEL, &mapped);
+ if (ret)
+ goto err_unmap_pages;
+
/* Don't get stuck if things have gone wrong */
mapped = max(mapped, pgsize);
+ total_mapped += mapped;
iova += mapped;
paddr += mapped;
len -= mapped;
@@ -421,6 +451,10 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
return 0;
+
+err_unmap_pages:
+ mmu_unmap_range(mmu, start_iova, total_mapped);
+ return ret;
}
int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
@@ -431,6 +465,7 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct sg_table *sgt;
int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE;
+ int ret;
if (WARN_ON(mapping->active))
return 0;
@@ -442,11 +477,18 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
if (WARN_ON(IS_ERR(sgt)))
return PTR_ERR(sgt);
- mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
- prot, sgt);
+ ret = mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
+ prot, sgt);
+ if (ret)
+ goto err_put_pages;
+
mapping->active = true;
return 0;
+
+err_put_pages:
+ drm_gem_shmem_put_pages_locked(shmem);
+ return ret;
}
void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
@@ -462,7 +504,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
if (WARN_ON(!mapping->active))
return;
- dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
+ dev_dbg(pfdev->base.dev, "unmap: as=%d, iova=%llx, len=%zx",
mapping->mmu->as, iova, len);
while (unmapped_len < len) {
@@ -559,7 +601,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
bo = bomapping->obj;
if (!bo->is_heap) {
- dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
+ dev_WARN(pfdev->base.dev, "matching BO is not heap type (GPU VA = %llx)",
bomapping->mmnode.start << PAGE_SHIFT);
ret = -EINVAL;
goto err_bo;
@@ -595,10 +637,12 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
refcount_set(&bo->base.pages_use_count, 1);
} else {
pages = bo->base.pages;
- if (pages[page_offset]) {
- /* Pages are already mapped, bail out. */
- goto out;
- }
+ }
+
+ sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
+ if (sgt->sgl) {
+ /* Pages are already mapped, bail out. */
+ goto out;
}
mapping = bo->base.base.filp->f_mapping;
@@ -620,23 +664,24 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
}
}
- sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
if (ret)
goto err_unlock;
- ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
+ ret = dma_map_sgtable(pfdev->base.dev, sgt, DMA_BIDIRECTIONAL, 0);
if (ret)
goto err_map;
- mmu_map_sg(pfdev, bomapping->mmu, addr,
- IOMMU_WRITE | IOMMU_READ | IOMMU_CACHE | IOMMU_NOEXEC, sgt);
+ ret = mmu_map_sg(pfdev, bomapping->mmu, addr,
+ IOMMU_WRITE | IOMMU_READ | IOMMU_CACHE | IOMMU_NOEXEC, sgt);
+ if (ret)
+ goto err_mmu_map_sg;
bomapping->active = true;
bo->heap_rss_size += SZ_2M;
- dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
+ dev_dbg(pfdev->base.dev, "mapped page fault @ AS%d %llx", as, addr);
out:
dma_resv_unlock(obj->resv);
@@ -645,6 +690,8 @@ out:
return 0;
+err_mmu_map_sg:
+ dma_unmap_sgtable(pfdev->base.dev, sgt, DMA_BIDIRECTIONAL, 0);
err_map:
sg_free_table(sgt);
err_unlock:
@@ -662,13 +709,12 @@ static void panfrost_mmu_release_ctx(struct kref *kref)
spin_lock(&pfdev->as_lock);
if (mmu->as >= 0) {
- pm_runtime_get_noresume(pfdev->dev);
- if (pm_runtime_active(pfdev->dev))
+ pm_runtime_get_noresume(pfdev->base.dev);
+ if (pm_runtime_active(pfdev->base.dev))
panfrost_mmu_disable(pfdev, mmu->as);
- pm_runtime_put_autosuspend(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->base.dev);
clear_bit(mmu->as, &pfdev->as_alloc_mask);
- clear_bit(mmu->as, &pfdev->as_in_use_mask);
list_del(&mmu->list);
}
spin_unlock(&pfdev->as_lock);
@@ -726,7 +772,7 @@ struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
if (pfdev->comp->gpu_quirks & BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE)) {
if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU)) {
- dev_err_once(pfdev->dev,
+ dev_err_once(pfdev->base.dev,
"AARCH64_4K page table not supported\n");
return ERR_PTR(-EINVAL);
}
@@ -755,7 +801,7 @@ struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
.oas = pa_bits,
.coherent_walk = pfdev->coherent,
.tlb = &mmu_tlb_ops,
- .iommu_dev = pfdev->dev,
+ .iommu_dev = pfdev->base.dev,
};
mmu->pgtbl_ops = alloc_io_pgtable_ops(fmt, &mmu->pgtbl_cfg, mmu);
@@ -848,7 +894,7 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
if (ret) {
/* terminal fault, print info about the fault */
- dev_err(pfdev->dev,
+ dev_err(pfdev->base.dev,
"Unhandled Page fault in AS%d at VA 0x%016llX\n"
"Reason: %s\n"
"raw fault status: 0x%X\n"
@@ -896,18 +942,18 @@ int panfrost_mmu_init(struct panfrost_device *pfdev)
{
int err;
- pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
+ pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->base.dev), "mmu");
if (pfdev->mmu_irq < 0)
return pfdev->mmu_irq;
- err = devm_request_threaded_irq(pfdev->dev, pfdev->mmu_irq,
+ err = devm_request_threaded_irq(pfdev->base.dev, pfdev->mmu_irq,
panfrost_mmu_irq_handler,
panfrost_mmu_irq_handler_thread,
IRQF_SHARED, KBUILD_MODNAME "-mmu",
pfdev);
if (err) {
- dev_err(pfdev->dev, "failed to request mmu irq");
+ dev_err(pfdev->base.dev, "failed to request mmu irq");
return err;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
index 022a9a74a114..27c3c65ed074 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.h
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
@@ -4,6 +4,7 @@
#ifndef __PANFROST_MMU_H__
#define __PANFROST_MMU_H__
+struct panfrost_device;
struct panfrost_gem_mapping;
struct panfrost_file_priv;
struct panfrost_mmu;
@@ -16,7 +17,7 @@ void panfrost_mmu_fini(struct panfrost_device *pfdev);
void panfrost_mmu_reset(struct panfrost_device *pfdev);
void panfrost_mmu_suspend_irq(struct panfrost_device *pfdev);
-u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
+int panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu);
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 0dd62e8b2fa7..7020c0192e18 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -84,11 +84,11 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
else if (perfcnt->user)
return -EBUSY;
- ret = pm_runtime_get_sync(pfdev->dev);
+ ret = pm_runtime_get_sync(pfdev->base.dev);
if (ret < 0)
goto err_put_pm;
- bo = drm_gem_shmem_create(pfdev->ddev, perfcnt->bosize);
+ bo = drm_gem_shmem_create(&pfdev->base, perfcnt->bosize);
if (IS_ERR(bo)) {
ret = PTR_ERR(bo);
goto err_put_pm;
@@ -130,9 +130,11 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
goto err_vunmap;
}
- perfcnt->user = user;
+ ret = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu);
+ if (ret < 0)
+ goto err_vunmap;
- as = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu);
+ as = ret;
cfg = GPU_PERFCNT_CFG_AS(as) |
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_MANUAL);
@@ -164,6 +166,8 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
/* The BO ref is retained by the mapping. */
drm_gem_object_put(&bo->base);
+ perfcnt->user = user;
+
return 0;
err_vunmap:
@@ -175,7 +179,7 @@ err_close_bo:
err_put_bo:
drm_gem_object_put(&bo->base);
err_put_pm:
- pm_runtime_put(pfdev->dev);
+ pm_runtime_put(pfdev->base.dev);
return ret;
}
@@ -203,7 +207,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
panfrost_gem_mapping_put(perfcnt->mapping);
perfcnt->mapping = NULL;
- pm_runtime_put_autosuspend(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->base.dev);
return 0;
}
@@ -211,7 +215,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(dev);
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
struct drm_panfrost_perfcnt_enable *req = data;
int ret;
@@ -238,7 +242,7 @@ int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data,
int panfrost_ioctl_perfcnt_dump(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(dev);
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
struct drm_panfrost_perfcnt_dump *req = data;
void __user *user_ptr = (void __user *)(uintptr_t)req->buf_ptr;
@@ -273,12 +277,12 @@ void panfrost_perfcnt_close(struct drm_file *file_priv)
struct panfrost_device *pfdev = pfile->pfdev;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
- pm_runtime_get_sync(pfdev->dev);
+ pm_runtime_get_sync(pfdev->base.dev);
mutex_lock(&perfcnt->lock);
if (perfcnt->user == pfile)
panfrost_perfcnt_disable_locked(pfdev, file_priv);
mutex_unlock(&perfcnt->lock);
- pm_runtime_put_autosuspend(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->base.dev);
}
int panfrost_perfcnt_init(struct panfrost_device *pfdev)
@@ -316,7 +320,7 @@ int panfrost_perfcnt_init(struct panfrost_device *pfdev)
COUNTERS_PER_BLOCK * BYTES_PER_COUNTER;
}
- perfcnt = devm_kzalloc(pfdev->dev, sizeof(*perfcnt), GFP_KERNEL);
+ perfcnt = devm_kzalloc(pfdev->base.dev, sizeof(*perfcnt), GFP_KERNEL);
if (!perfcnt)
return -ENOMEM;
diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.c b/drivers/gpu/drm/panthor/panthor_devfreq.c
index 3686515d368d..2df1d76d84a0 100644
--- a/drivers/gpu/drm/panthor/panthor_devfreq.c
+++ b/drivers/gpu/drm/panthor/panthor_devfreq.c
@@ -146,10 +146,9 @@ int panthor_devfreq_init(struct panthor_device *ptdev)
ptdev->devfreq = pdevfreq;
ret = devm_pm_opp_set_regulators(dev, reg_names);
- if (ret) {
+ if (ret && ret != -ENODEV) {
if (ret != -EPROBE_DEFER)
DRM_DEV_ERROR(dev, "Couldn't set OPP regulators\n");
-
return ret;
}
diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c
index 81df49880bd8..c7033d82cef5 100644
--- a/drivers/gpu/drm/panthor/panthor_device.c
+++ b/drivers/gpu/drm/panthor/panthor_device.c
@@ -172,6 +172,8 @@ int panthor_device_init(struct panthor_device *ptdev)
struct page *p;
int ret;
+ ptdev->soc_data = of_device_get_match_data(ptdev->base.dev);
+
init_completion(&ptdev->unplug.done);
ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock);
if (ret)
diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h
index 4fc7cf2aeed5..9f0649ecfc4f 100644
--- a/drivers/gpu/drm/panthor/panthor_device.h
+++ b/drivers/gpu/drm/panthor/panthor_device.h
@@ -32,6 +32,17 @@ struct panthor_vm;
struct panthor_vm_pool;
/**
+ * struct panthor_soc_data - Panthor SoC Data
+ */
+struct panthor_soc_data {
+ /** @asn_hash_enable: True if GPU_L2_CONFIG_ASN_HASH_ENABLE must be set. */
+ bool asn_hash_enable;
+
+ /** @asn_hash: ASN_HASH values when asn_hash_enable is true. */
+ u32 asn_hash[3];
+};
+
+/**
* enum panthor_device_pm_state - PM state
*/
enum panthor_device_pm_state {
@@ -93,6 +104,9 @@ struct panthor_device {
/** @base: Base drm_device. */
struct drm_device base;
+ /** @soc_data: Optional SoC data. */
+ const struct panthor_soc_data *soc_data;
+
/** @phys_addr: Physical address of the iomem region. */
phys_addr_t phys_addr;
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index 4c202fc5ce05..fb4b293f17f0 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -1105,7 +1105,7 @@ static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
if (ret)
goto out;
- ret = panthor_group_create(pfile, args, queue_args);
+ ret = panthor_group_create(pfile, args, queue_args, file->client_id);
if (ret < 0)
goto out;
args->group_handle = ret;
@@ -1682,7 +1682,13 @@ static struct attribute *panthor_attrs[] = {
ATTRIBUTE_GROUPS(panthor);
+static const struct panthor_soc_data soc_data_mediatek_mt8196 = {
+ .asn_hash_enable = true,
+ .asn_hash = { 0xb, 0xe, 0x0, },
+};
+
static const struct of_device_id dt_match[] = {
+ { .compatible = "mediatek,mt8196-mali", .data = &soc_data_mediatek_mt8196, },
{ .compatible = "rockchip,rk3588-mali" },
{ .compatible = "arm,mali-valhall-csf" },
{}
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c
index db69449a5be0..9d98720ce03f 100644
--- a/drivers/gpu/drm/panthor/panthor_gpu.c
+++ b/drivers/gpu/drm/panthor/panthor_gpu.c
@@ -52,6 +52,28 @@ static void panthor_gpu_coherency_set(struct panthor_device *ptdev)
ptdev->coherent ? GPU_COHERENCY_PROT_BIT(ACE_LITE) : GPU_COHERENCY_NONE);
}
+static void panthor_gpu_l2_config_set(struct panthor_device *ptdev)
+{
+ const struct panthor_soc_data *data = ptdev->soc_data;
+ u32 l2_config;
+ u32 i;
+
+ if (!data || !data->asn_hash_enable)
+ return;
+
+ if (GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id) < 11) {
+ drm_err(&ptdev->base, "Custom ASN hash not supported by the device");
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(data->asn_hash); i++)
+ gpu_write(ptdev, GPU_ASN_HASH(i), data->asn_hash[i]);
+
+ l2_config = gpu_read(ptdev, GPU_L2_CONFIG);
+ l2_config |= GPU_L2_CONFIG_ASN_HASH_ENABLE;
+ gpu_write(ptdev, GPU_L2_CONFIG, l2_config);
+}
+
static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status)
{
gpu_write(ptdev, GPU_INT_CLEAR, status);
@@ -241,8 +263,9 @@ int panthor_gpu_l2_power_on(struct panthor_device *ptdev)
hweight64(ptdev->gpu_info.shader_present));
}
- /* Set the desired coherency mode before the power up of L2 */
+ /* Set the desired coherency mode and L2 config before the power up of L2 */
panthor_gpu_coherency_set(ptdev);
+ panthor_gpu_l2_config_set(ptdev);
return panthor_gpu_power_on(ptdev, L2, 1, 20000);
}
diff --git a/drivers/gpu/drm/panthor/panthor_regs.h b/drivers/gpu/drm/panthor/panthor_regs.h
index 8bee76d01bf8..8fa69f33e911 100644
--- a/drivers/gpu/drm/panthor/panthor_regs.h
+++ b/drivers/gpu/drm/panthor/panthor_regs.h
@@ -64,6 +64,8 @@
#define GPU_FAULT_STATUS 0x3C
#define GPU_FAULT_ADDR 0x40
+#define GPU_L2_CONFIG 0x48
+#define GPU_L2_CONFIG_ASN_HASH_ENABLE BIT(24)
#define GPU_PWR_KEY 0x50
#define GPU_PWR_KEY_UNLOCK 0x2968A819
@@ -110,6 +112,8 @@
#define GPU_REVID 0x280
+#define GPU_ASN_HASH(n) (0x2C0 + ((n) * 4))
+
#define GPU_COHERENCY_FEATURES 0x300
#define GPU_COHERENCY_PROT_BIT(name) BIT(GPU_COHERENCY_ ## name)
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 3d1f57e3990f..f5e01cb16cfc 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -360,6 +360,9 @@ struct panthor_queue {
/** @entity: DRM scheduling entity used for this queue. */
struct drm_sched_entity entity;
+ /** @name: DRM scheduler name for this queue. */
+ char *name;
+
/**
* @remaining_time: Time remaining before the job timeout expires.
*
@@ -900,6 +903,8 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue *
if (queue->scheduler.ops)
drm_sched_fini(&queue->scheduler);
+ kfree(queue->name);
+
panthor_queue_put_syncwait_obj(queue);
panthor_kernel_bo_destroy(queue->ringbuf);
@@ -1411,7 +1416,7 @@ cs_slot_process_fault_event_locked(struct panthor_device *ptdev,
fault = cs_iface->output->fault;
info = cs_iface->output->fault_info;
- if (queue && CS_EXCEPTION_TYPE(fault) == DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT) {
+ if (queue) {
u64 cs_extract = queue->iface.output->extract;
struct panthor_job *job;
@@ -3307,9 +3312,10 @@ static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev,
static struct panthor_queue *
group_create_queue(struct panthor_group *group,
- const struct drm_panthor_queue_create *args)
+ const struct drm_panthor_queue_create *args,
+ u64 drm_client_id, u32 gid, u32 qid)
{
- const struct drm_sched_init_args sched_args = {
+ struct drm_sched_init_args sched_args = {
.ops = &panthor_queue_sched_ops,
.submit_wq = group->ptdev->scheduler->wq,
.num_rqs = 1,
@@ -3322,7 +3328,6 @@ group_create_queue(struct panthor_group *group,
.credit_limit = args->ringbuf_size / sizeof(u64),
.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
.timeout_wq = group->ptdev->reset.wq,
- .name = "panthor-queue",
.dev = group->ptdev->base.dev,
};
struct drm_gpu_scheduler *drm_sched;
@@ -3397,6 +3402,15 @@ group_create_queue(struct panthor_group *group,
if (ret)
goto err_free_queue;
+ /* assign a unique name */
+ queue->name = kasprintf(GFP_KERNEL, "panthor-queue-%llu-%u-%u", drm_client_id, gid, qid);
+ if (!queue->name) {
+ ret = -ENOMEM;
+ goto err_free_queue;
+ }
+
+ sched_args.name = queue->name;
+
ret = drm_sched_init(&queue->scheduler, &sched_args);
if (ret)
goto err_free_queue;
@@ -3446,7 +3460,8 @@ static void add_group_kbo_sizes(struct panthor_device *ptdev,
int panthor_group_create(struct panthor_file *pfile,
const struct drm_panthor_group_create *group_args,
- const struct drm_panthor_queue_create *queue_args)
+ const struct drm_panthor_queue_create *queue_args,
+ u64 drm_client_id)
{
struct panthor_device *ptdev = pfile->ptdev;
struct panthor_group_pool *gpool = pfile->groups;
@@ -3539,12 +3554,16 @@ int panthor_group_create(struct panthor_file *pfile,
memset(group->syncobjs->kmap, 0,
group_args->queues.count * sizeof(struct panthor_syncobj_64b));
+ ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
+ if (ret)
+ goto err_put_group;
+
for (i = 0; i < group_args->queues.count; i++) {
- group->queues[i] = group_create_queue(group, &queue_args[i]);
+ group->queues[i] = group_create_queue(group, &queue_args[i], drm_client_id, gid, i);
if (IS_ERR(group->queues[i])) {
ret = PTR_ERR(group->queues[i]);
group->queues[i] = NULL;
- goto err_put_group;
+ goto err_erase_gid;
}
group->queue_count++;
@@ -3552,10 +3571,6 @@ int panthor_group_create(struct panthor_file *pfile,
group->idle_queues = GENMASK(group->queue_count - 1, 0);
- ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
- if (ret)
- goto err_put_group;
-
mutex_lock(&sched->reset.lock);
if (atomic_read(&sched->reset.in_progress)) {
panthor_group_stop(group);
@@ -3574,6 +3589,9 @@ int panthor_group_create(struct panthor_file *pfile,
return gid;
+err_erase_gid:
+ xa_erase(&gpool->xa, gid);
+
err_put_group:
group_put(group);
return ret;
diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h
index 742b0b4ff3a3..f4a475aa34c0 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.h
+++ b/drivers/gpu/drm/panthor/panthor_sched.h
@@ -21,7 +21,8 @@ struct panthor_job;
int panthor_group_create(struct panthor_file *pfile,
const struct drm_panthor_group_create *group_args,
- const struct drm_panthor_queue_create *queue_args);
+ const struct drm_panthor_queue_create *queue_args,
+ u64 drm_client_id);
int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle);
int panthor_group_get_state(struct panthor_file *pfile,
struct drm_panthor_group_get_state *get_state);
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index b9fe926a49e8..6d567e5c7c6f 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -473,12 +473,15 @@ static int pl111_clk_div_choose_div(struct clk_hw *hw, unsigned long rate,
return best_div;
}
-static long pl111_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int pl111_clk_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- int div = pl111_clk_div_choose_div(hw, rate, prate, true);
+ int div = pl111_clk_div_choose_div(hw, req->rate,
+ &req->best_parent_rate, true);
- return DIV_ROUND_UP_ULL(*prate, div);
+ req->rate = DIV_ROUND_UP_ULL(req->best_parent_rate, div);
+
+ return 0;
}
static unsigned long pl111_clk_div_recalc_rate(struct clk_hw *hw,
@@ -528,7 +531,7 @@ static int pl111_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops pl111_clk_div_ops = {
.recalc_rate = pl111_clk_div_recalc_rate,
- .round_rate = pl111_clk_div_round_rate,
+ .determine_rate = pl111_clk_div_determine_rate,
.set_rate = pl111_clk_div_set_rate,
};
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index ae7e572b1b4a..b7d0e60c0de2 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -37,6 +37,8 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_vblank_helper.h>
#include "qxl_drv.h"
#include "qxl_object.h"
@@ -382,7 +384,25 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
static void qxl_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_pending_vblank_event *event;
+
qxl_crtc_update_monitors_config(crtc, "flush");
+
+ spin_lock_irq(&dev->event_lock);
+
+ event = crtc_state->event;
+ crtc_state->event = NULL;
+
+ if (event) {
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ }
+
+ spin_unlock_irq(&dev->event_lock);
}
static void qxl_crtc_destroy(struct drm_crtc *crtc)
@@ -401,6 +421,7 @@ static const struct drm_crtc_funcs qxl_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ DRM_CRTC_VBLANK_TIMER_FUNCS,
};
static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
@@ -455,11 +476,15 @@ static void qxl_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
qxl_crtc_update_monitors_config(crtc, "enable");
+
+ drm_crtc_vblank_on(crtc);
}
static void qxl_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ drm_crtc_vblank_off(crtc);
+
qxl_crtc_update_monitors_config(crtc, "disable");
}
@@ -1276,6 +1301,10 @@ int qxl_modeset_init(struct qxl_device *qdev)
qxl_display_read_client_monitors_config(qdev);
+ ret = drm_vblank_init(&qdev->ddev, qxl_num_crtc);
+ if (ret)
+ return ret;
+
drm_mode_config_reset(&qdev->ddev);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index fc5e3763c359..d26043424e95 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -39,7 +39,7 @@ void qxl_gem_object_free(struct drm_gem_object *gobj)
qxl_surface_evict(qdev, qobj, false);
tbo = &qobj->tbo;
- ttm_bo_put(tbo);
+ ttm_bo_fini(tbo);
}
int qxl_gem_object_create(struct qxl_device *qdev, int size,
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 9e35b14e2bf0..60afaa8e56b4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1635,7 +1635,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
}
if (notify_clients)
- drm_client_dev_suspend(dev, false);
+ drm_client_dev_suspend(dev);
return 0;
}
@@ -1739,7 +1739,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients)
radeon_pm_compute_clocks(rdev);
if (notify_clients)
- drm_client_dev_resume(dev, false);
+ drm_client_dev_resume(dev);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index f86773f3db20..18ca1bcfd2f9 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -86,7 +86,7 @@ static void radeon_gem_object_free(struct drm_gem_object *gobj)
if (robj) {
radeon_mn_unregister(robj);
- ttm_bo_put(&robj->tbo);
+ ttm_bo_fini(&robj->tbo);
}
}
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
index 216219accfd9..6294443f6068 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
@@ -11,6 +11,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -407,8 +408,8 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct rcar_du_device *rcdu = to_rcar_du_device(dev);
- unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
unsigned int align;
+ int ret;
/*
* The R8A7779 DU requires a 16 pixels pitch alignment as documented,
@@ -419,7 +420,9 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
else
align = 16 * args->bpp / 8;
- args->pitch = roundup(min_pitch, align);
+ ret = drm_mode_size_dumb(dev, args, align, 0);
+ if (ret)
+ return ret;
return drm_gem_dma_dumb_create_internal(file, dev, args);
}
diff --git a/drivers/gpu/drm/renesas/rz-du/Kconfig b/drivers/gpu/drm/renesas/rz-du/Kconfig
index e57536fd6f4d..7f2ef7137ae5 100644
--- a/drivers/gpu/drm/renesas/rz-du/Kconfig
+++ b/drivers/gpu/drm/renesas/rz-du/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config DRM_RZG2L_DU
tristate "DRM Support for RZ/G2L Display Unit"
- depends on ARCH_RZG2L || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
depends on DRM && OF
depends on VIDEO_RENESAS_VSP1
select DRM_CLIENT_SELECTION
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index d30f0983a53a..937f83cf42fc 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -335,15 +335,9 @@ static int rockchip_dp_of_probe(struct rockchip_dp_device *dp)
return PTR_ERR(dp->grf);
}
- dp->grfclk = devm_clk_get(dev, "grf");
- if (PTR_ERR(dp->grfclk) == -ENOENT) {
- dp->grfclk = NULL;
- } else if (PTR_ERR(dp->grfclk) == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
- } else if (IS_ERR(dp->grfclk)) {
- DRM_DEV_ERROR(dev, "failed to get grf clock\n");
- return PTR_ERR(dp->grfclk);
- }
+ dp->grfclk = devm_clk_get_optional(dev, "grf");
+ if (IS_ERR(dp->grfclk))
+ return dev_err_probe(dev, PTR_ERR(dp->grfclk), "failed to get grf clock\n");
dp->pclk = devm_clk_get(dev, "pclk");
if (IS_ERR(dp->pclk)) {
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index 5523911b990d..de8405ee8241 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -163,6 +163,11 @@
#define RK3288_DSI0_LCDC_SEL BIT(6)
#define RK3288_DSI1_LCDC_SEL BIT(9)
+#define RK3368_GRF_SOC_CON7 0x41c
+#define RK3368_DSI_FORCETXSTOPMODE (0xf << 7)
+#define RK3368_DSI_FORCERXMODE BIT(6)
+#define RK3368_DSI_TURNDISABLE BIT(5)
+
#define RK3399_GRF_SOC_CON20 0x6250
#define RK3399_DSI0_LCDC_SEL BIT(0)
#define RK3399_DSI1_LCDC_SEL BIT(4)
@@ -1528,6 +1533,18 @@ static const struct rockchip_dw_dsi_chip_data rk3288_chip_data[] = {
{ /* sentinel */ }
};
+static const struct rockchip_dw_dsi_chip_data rk3368_chip_data[] = {
+ {
+ .reg = 0xff960000,
+ .lanecfg1_grf_reg = RK3368_GRF_SOC_CON7,
+ .lanecfg1 = FIELD_PREP_WM16_CONST((RK3368_DSI_TURNDISABLE |
+ RK3368_DSI_FORCETXSTOPMODE |
+ RK3368_DSI_FORCERXMODE), 0),
+ .max_data_lanes = 4,
+ },
+ { /* sentinel */ }
+};
+
static int rk3399_dphy_tx1rx1_init(struct phy *phy)
{
struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy);
@@ -1688,6 +1705,9 @@ static const struct of_device_id dw_mipi_dsi_rockchip_dt_ids[] = {
.compatible = "rockchip,rk3288-mipi-dsi",
.data = &rk3288_chip_data,
}, {
+ .compatible = "rockchip,rk3368-mipi-dsi",
+ .data = &rk3368_chip_data,
+ }, {
.compatible = "rockchip,rk3399-mipi-dsi",
.data = &rk3399_chip_data,
}, {
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
index ed6e8f036f4b..931343b072ad 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
@@ -429,14 +429,15 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
void *data)
{
struct platform_device *pdev = to_platform_device(dev);
+ struct dw_hdmi_qp_plat_data plat_data = {};
const struct rockchip_hdmi_qp_cfg *cfg;
- struct dw_hdmi_qp_plat_data plat_data;
struct drm_device *drm = data;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct rockchip_hdmi_qp *hdmi;
struct resource *res;
struct clk_bulk_data *clks;
+ struct clk *ref_clk;
int ret, irq, i;
if (!pdev->dev.of_node)
@@ -455,10 +456,8 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
return -ENODEV;
if (!cfg->ctrl_ops || !cfg->ctrl_ops->io_init ||
- !cfg->ctrl_ops->irq_callback || !cfg->ctrl_ops->hardirq_callback) {
- dev_err(dev, "Missing platform ctrl ops\n");
- return -ENODEV;
- }
+ !cfg->ctrl_ops->irq_callback || !cfg->ctrl_ops->hardirq_callback)
+ return dev_err_probe(dev, -ENODEV, "Missing platform ctrl ops\n");
hdmi->ctrl_ops = cfg->ctrl_ops;
hdmi->dev = &pdev->dev;
@@ -471,10 +470,9 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
break;
}
}
- if (hdmi->port_id < 0) {
- dev_err(hdmi->dev, "Failed to match HDMI port ID\n");
- return hdmi->port_id;
- }
+ if (hdmi->port_id < 0)
+ return dev_err_probe(hdmi->dev, hdmi->port_id,
+ "Failed to match HDMI port ID\n");
plat_data.phy_ops = cfg->phy_ops;
plat_data.phy_data = hdmi;
@@ -495,39 +493,38 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
hdmi->regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,grf");
- if (IS_ERR(hdmi->regmap)) {
- dev_err(hdmi->dev, "Unable to get rockchip,grf\n");
- return PTR_ERR(hdmi->regmap);
- }
+ if (IS_ERR(hdmi->regmap))
+ return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->regmap),
+ "Unable to get rockchip,grf\n");
hdmi->vo_regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,vo-grf");
- if (IS_ERR(hdmi->vo_regmap)) {
- dev_err(hdmi->dev, "Unable to get rockchip,vo-grf\n");
- return PTR_ERR(hdmi->vo_regmap);
- }
+ if (IS_ERR(hdmi->vo_regmap))
+ return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->vo_regmap),
+ "Unable to get rockchip,vo-grf\n");
ret = devm_clk_bulk_get_all_enabled(hdmi->dev, &clks);
- if (ret < 0) {
- dev_err(hdmi->dev, "Failed to get clocks: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(hdmi->dev, ret, "Failed to get clocks\n");
+
+ ref_clk = clk_get(hdmi->dev, "ref");
+ if (IS_ERR(ref_clk))
+ return dev_err_probe(hdmi->dev, PTR_ERR(ref_clk),
+ "Failed to get ref clock\n");
+
+ plat_data.ref_clk_rate = clk_get_rate(ref_clk);
+ clk_put(ref_clk);
hdmi->enable_gpio = devm_gpiod_get_optional(hdmi->dev, "enable",
GPIOD_OUT_HIGH);
- if (IS_ERR(hdmi->enable_gpio)) {
- ret = PTR_ERR(hdmi->enable_gpio);
- dev_err(hdmi->dev, "Failed to request enable GPIO: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(hdmi->enable_gpio))
+ return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->enable_gpio),
+ "Failed to request enable GPIO\n");
hdmi->phy = devm_of_phy_get_by_index(dev, dev->of_node, 0);
- if (IS_ERR(hdmi->phy)) {
- ret = PTR_ERR(hdmi->phy);
- if (ret != -EPROBE_DEFER)
- dev_err(hdmi->dev, "failed to get phy: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(hdmi->phy))
+ return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->phy),
+ "Failed to get phy\n");
cfg->ctrl_ops->io_init(hdmi);
@@ -537,6 +534,10 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
if (plat_data.main_irq < 0)
return plat_data.main_irq;
+ plat_data.cec_irq = platform_get_irq_byname(pdev, "cec");
+ if (plat_data.cec_irq < 0)
+ return plat_data.cec_irq;
+
irq = platform_get_irq_byname(pdev, "hpd");
if (irq < 0)
return irq;
@@ -556,17 +557,15 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
hdmi->hdmi = dw_hdmi_qp_bind(pdev, encoder, &plat_data);
if (IS_ERR(hdmi->hdmi)) {
- ret = PTR_ERR(hdmi->hdmi);
drm_encoder_cleanup(encoder);
- return ret;
+ return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->hdmi),
+ "Failed to bind dw-hdmi-qp");
}
connector = drm_bridge_connector_init(drm, encoder);
- if (IS_ERR(connector)) {
- ret = PTR_ERR(connector);
- dev_err(hdmi->dev, "failed to init bridge connector: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(connector))
+ return dev_err_probe(hdmi->dev, PTR_ERR(connector),
+ "Failed to init bridge connector\n");
return drm_connector_attach_encoder(connector, encoder);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 6330b883efc3..3bd06202e232 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -9,6 +9,7 @@
#include <linux/vmalloc.h>
#include <drm/drm.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_dma_helper.h>
@@ -403,13 +404,12 @@ int rockchip_gem_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args)
{
struct rockchip_gem_object *rk_obj;
- int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ int ret;
- /*
- * align to 64 bytes since Mali requires it.
- */
- args->pitch = ALIGN(min_pitch, 64);
- args->size = args->pitch * args->height;
+ /* 64-byte alignment required by Mali */
+ ret = drm_mode_size_dumb(dev, args, SZ_64, 0);
+ if (ret)
+ return ret;
rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
&args->handle);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index ba6b0528d1e5..5369b77ea434 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -826,8 +826,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
if (!crtc || WARN_ON(!fb))
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
@@ -1092,7 +1091,8 @@ static int vop_plane_atomic_async_check(struct drm_plane *plane,
if (!plane->state->fb)
return -EINVAL;
- crtc_state = drm_atomic_get_existing_crtc_state(state, new_plane_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_plane_state->crtc);
/* Special case for asynchronous cursor updates. */
if (!crtc_state)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index 7ec7bea5e38e..284c8a048034 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -1003,6 +1003,8 @@ static int vop2_plane_atomic_check(struct drm_plane *plane,
struct drm_rect *src = &pstate->src;
int min_scale = FRAC_16_16(1, 8);
int max_scale = FRAC_16_16(8, 1);
+ int src_x, src_w, src_h;
+ int dest_w, dest_h;
int format;
int ret;
@@ -1013,7 +1015,7 @@ static int vop2_plane_atomic_check(struct drm_plane *plane,
vop2 = vp->vop2;
vop2_data = vop2->data;
- cstate = drm_atomic_get_existing_crtc_state(pstate->state, crtc);
+ cstate = drm_atomic_get_new_crtc_state(pstate->state, crtc);
if (WARN_ON(!cstate))
return -EINVAL;
@@ -1030,22 +1032,25 @@ static int vop2_plane_atomic_check(struct drm_plane *plane,
if (format < 0)
return format;
- if (drm_rect_width(src) >> 16 < 4 || drm_rect_height(src) >> 16 < 4 ||
- drm_rect_width(dest) < 4 || drm_rect_height(dest) < 4) {
- drm_err(vop2->drm, "Invalid size: %dx%d->%dx%d, min size is 4x4\n",
- drm_rect_width(src) >> 16, drm_rect_height(src) >> 16,
- drm_rect_width(dest), drm_rect_height(dest));
- pstate->visible = false;
- return 0;
+ /* Co-ordinates have now been clipped */
+ src_x = src->x1 >> 16;
+ src_w = drm_rect_width(src) >> 16;
+ src_h = drm_rect_height(src) >> 16;
+ dest_w = drm_rect_width(dest);
+ dest_h = drm_rect_height(dest);
+
+ if (src_w < 4 || src_h < 4 || dest_w < 4 || dest_h < 4) {
+ drm_dbg_kms(vop2->drm, "Invalid size: %dx%d->%dx%d, min size is 4x4\n",
+ src_w, src_h, dest_w, dest_h);
+ return -EINVAL;
}
- if (drm_rect_width(src) >> 16 > vop2_data->max_input.width ||
- drm_rect_height(src) >> 16 > vop2_data->max_input.height) {
- drm_err(vop2->drm, "Invalid source: %dx%d. max input: %dx%d\n",
- drm_rect_width(src) >> 16,
- drm_rect_height(src) >> 16,
- vop2_data->max_input.width,
- vop2_data->max_input.height);
+ if (src_w > vop2_data->max_input.width ||
+ src_h > vop2_data->max_input.height) {
+ drm_dbg_kms(vop2->drm, "Invalid source: %dx%d. max input: %dx%d\n",
+ src_w, src_h,
+ vop2_data->max_input.width,
+ vop2_data->max_input.height);
return -EINVAL;
}
@@ -1053,8 +1058,8 @@ static int vop2_plane_atomic_check(struct drm_plane *plane,
* Src.x1 can be odd when do clip, but yuv plane start point
* need align with 2 pixel.
*/
- if (fb->format->is_yuv && ((pstate->src.x1 >> 16) % 2)) {
- drm_err(vop2->drm, "Invalid Source: Yuv format not support odd xpos\n");
+ if (fb->format->is_yuv && src_x % 2) {
+ drm_dbg_kms(vop2->drm, "Invalid Source: Yuv format not support odd xpos\n");
return -EINVAL;
}
@@ -1140,7 +1145,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
struct vop2 *vop2 = win->vop2;
struct drm_framebuffer *fb = pstate->fb;
u32 bpp = vop2_get_bpp(fb->format);
- u32 actual_w, actual_h, dsp_w, dsp_h;
+ u32 src_w, src_h, dsp_w, dsp_h;
u32 act_info, dsp_info;
u32 format;
u32 afbc_format;
@@ -1204,8 +1209,8 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
uv_mst = rk_obj->dma_addr + offset + fb->offsets[1];
}
- actual_w = drm_rect_width(src) >> 16;
- actual_h = drm_rect_height(src) >> 16;
+ src_w = drm_rect_width(src) >> 16;
+ src_h = drm_rect_height(src) >> 16;
dsp_w = drm_rect_width(dest);
if (dest->x1 + dsp_w > adjusted_mode->hdisplay) {
@@ -1215,7 +1220,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
dsp_w = adjusted_mode->hdisplay - dest->x1;
if (dsp_w < 4)
dsp_w = 4;
- actual_w = dsp_w * actual_w / drm_rect_width(dest);
+ src_w = dsp_w * src_w / drm_rect_width(dest);
}
dsp_h = drm_rect_height(dest);
@@ -1227,35 +1232,35 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
dsp_h = adjusted_mode->vdisplay - dest->y1;
if (dsp_h < 4)
dsp_h = 4;
- actual_h = dsp_h * actual_h / drm_rect_height(dest);
+ src_h = dsp_h * src_h / drm_rect_height(dest);
}
/*
* This is workaround solution for IC design:
- * esmart can't support scale down when actual_w % 16 == 1.
+ * esmart can't support scale down when src_w % 16 == 1.
*/
if (!(win->data->feature & WIN_FEATURE_AFBDC)) {
- if (actual_w > dsp_w && (actual_w & 0xf) == 1) {
+ if (src_w > dsp_w && (src_w & 0xf) == 1) {
drm_dbg_kms(vop2->drm, "vp%d %s act_w[%d] MODE 16 == 1\n",
- vp->id, win->data->name, actual_w);
- actual_w -= 1;
+ vp->id, win->data->name, src_w);
+ src_w -= 1;
}
}
- if (afbc_en && actual_w % 4) {
- drm_dbg_kms(vop2->drm, "vp%d %s actual_w[%d] not 4 pixel aligned\n",
- vp->id, win->data->name, actual_w);
- actual_w = ALIGN_DOWN(actual_w, 4);
+ if (afbc_en && src_w % 4) {
+ drm_dbg_kms(vop2->drm, "vp%d %s src_w[%d] not 4 pixel aligned\n",
+ vp->id, win->data->name, src_w);
+ src_w = ALIGN_DOWN(src_w, 4);
}
- act_info = (actual_h - 1) << 16 | ((actual_w - 1) & 0xffff);
+ act_info = (src_h - 1) << 16 | ((src_w - 1) & 0xffff);
dsp_info = (dsp_h - 1) << 16 | ((dsp_w - 1) & 0xffff);
format = vop2_convert_format(fb->format->format);
half_block_en = vop2_half_block_enable(pstate);
drm_dbg(vop2->drm, "vp%d update %s[%dx%d->%dx%d@%dx%d] fmt[%p4cc_%s] addr[%pad]\n",
- vp->id, win->data->name, actual_w, actual_h, dsp_w, dsp_h,
+ vp->id, win->data->name, src_w, src_h, dsp_w, dsp_h,
dest->x1, dest->y1,
&fb->format->format,
afbc_en ? "AFBC" : "", &yrgb_mst);
@@ -1284,7 +1289,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
if (fb->modifier & AFBC_FORMAT_MOD_YTR)
afbc_format |= (1 << 4);
- afbc_tile_num = ALIGN(actual_w, block_w) / block_w;
+ afbc_tile_num = ALIGN(src_w, block_w) / block_w;
/*
* AFBC pic_vir_width is count by pixel, this is different
@@ -1362,8 +1367,8 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
if (rotate_90 || rotate_270) {
act_info = swahw32(act_info);
- actual_w = drm_rect_height(src) >> 16;
- actual_h = drm_rect_width(src) >> 16;
+ src_w = drm_rect_height(src) >> 16;
+ src_h = drm_rect_width(src) >> 16;
}
vop2_win_write(win, VOP2_WIN_FORMAT, format);
@@ -1379,7 +1384,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
vop2_win_write(win, VOP2_WIN_UV_MST, uv_mst);
}
- vop2_setup_scale(vop2, win, actual_w, actual_h, dsp_w, dsp_h, fb->format->format);
+ vop2_setup_scale(vop2, win, src_w, src_h, dsp_w, dsp_h, fb->format->format);
if (!vop2_cluster_window(win))
vop2_plane_setup_color_key(plane, 0);
vop2_win_write(win, VOP2_WIN_ACT_INFO, act_info);
@@ -2647,6 +2652,12 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(vop2->map))
return PTR_ERR(vop2->map);
+ /* Set the bounds for framebuffer creation */
+ drm->mode_config.min_width = 4;
+ drm->mode_config.min_height = 4;
+ drm->mode_config.max_width = vop2_data->max_input.width;
+ drm->mode_config.max_height = vop2_data->max_input.height;
+
ret = vop2_win_init(vop2);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index d1f788763318..219f8c2fa88e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -880,6 +880,7 @@ static const struct vop_data rk3368_vop = {
.win = rk3368_vop_win_data,
.win_size = ARRAY_SIZE(rk3368_vop_win_data),
.max_output = { 4096, 2160 },
+ .lut_size = 1024,
};
static const struct vop_intr rk3366_vop_intr = {
diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h
index 7f31d35780cc..553d45abd057 100644
--- a/drivers/gpu/drm/scheduler/tests/sched_tests.h
+++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h
@@ -31,9 +31,8 @@
*
* @base: DRM scheduler base class
* @test: Backpointer to owning the kunit test case
- * @lock: Lock to protect the simulated @hw_timeline, @job_list and @done_list
+ * @lock: Lock to protect the simulated @hw_timeline and @job_list
* @job_list: List of jobs submitted to the mock GPU
- * @done_list: List of jobs completed by the mock GPU
* @hw_timeline: Simulated hardware timeline has a @context, @next_seqno and
* @cur_seqno for implementing a struct dma_fence signaling the
* simulated job completion.
diff --git a/drivers/gpu/drm/sitronix/st7571-i2c.c b/drivers/gpu/drm/sitronix/st7571-i2c.c
index a6c4a6738ded..32b91d65b768 100644
--- a/drivers/gpu/drm/sitronix/st7571-i2c.c
+++ b/drivers/gpu/drm/sitronix/st7571-i2c.c
@@ -263,6 +263,7 @@ static int st7571_fb_clear_screen(struct st7571_device *st7571)
u32 npixels = st7571->ncols * round_up(st7571->nlines, ST7571_PAGE_HEIGHT) * st7571->bpp;
char pixelvalue = 0x00;
+ st7571_set_position(st7571, 0, 0);
for (int i = 0; i < npixels; i++)
regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, &pixelvalue, 1);
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index eec43d1a5595..7e2e69ce890f 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -1016,15 +1016,9 @@ static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb,
dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8);
- ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
- if (ret)
- return ret;
-
iosys_map_set_vaddr(&dst, buf);
drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
- drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
-
ssd130x_update_rect(ssd130x, rect, buf, data_array);
return ret;
@@ -1048,15 +1042,9 @@ static int ssd132x_fb_blit_rect(struct drm_framebuffer *fb,
dst_pitch = drm_rect_width(rect);
- ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
- if (ret)
- return ret;
-
iosys_map_set_vaddr(&dst, buf);
drm_fb_xrgb8888_to_gray8(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
- drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
-
ssd132x_update_rect(ssd130x, rect, buf, data_array);
return ret;
@@ -1078,15 +1066,9 @@ static int ssd133x_fb_blit_rect(struct drm_framebuffer *fb,
dst_pitch = drm_format_info_min_pitch(fi, 0, drm_rect_width(rect));
- ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
- if (ret)
- return ret;
-
iosys_map_set_vaddr(&dst, data_array);
drm_fb_xrgb8888_to_rgb332(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
- drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
-
ssd133x_update_rect(ssd130x, rect, data_array, dst_pitch);
return ret;
@@ -1232,6 +1214,9 @@ static void ssd130x_primary_plane_atomic_update(struct drm_plane *plane,
if (!drm_dev_enter(drm, &idx))
return;
+ if (drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE))
+ goto out_drm_dev_exit;
+
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
dst_clip = plane_state->dst;
@@ -1245,6 +1230,9 @@ static void ssd130x_primary_plane_atomic_update(struct drm_plane *plane,
&shadow_plane_state->fmtcnv_state);
}
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+
+out_drm_dev_exit:
drm_dev_exit(idx);
}
@@ -1267,6 +1255,9 @@ static void ssd132x_primary_plane_atomic_update(struct drm_plane *plane,
if (!drm_dev_enter(drm, &idx))
return;
+ if (drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE))
+ goto out_drm_dev_exit;
+
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
dst_clip = plane_state->dst;
@@ -1280,6 +1271,9 @@ static void ssd132x_primary_plane_atomic_update(struct drm_plane *plane,
&shadow_plane_state->fmtcnv_state);
}
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+
+out_drm_dev_exit:
drm_dev_exit(idx);
}
@@ -1301,6 +1295,9 @@ static void ssd133x_primary_plane_atomic_update(struct drm_plane *plane,
if (!drm_dev_enter(drm, &idx))
return;
+ if (drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE))
+ goto out_drm_dev_exit;
+
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
dst_clip = plane_state->dst;
@@ -1313,6 +1310,9 @@ static void ssd133x_primary_plane_atomic_update(struct drm_plane *plane,
&shadow_plane_state->fmtcnv_state);
}
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+
+out_drm_dev_exit:
drm_dev_exit(idx);
}
@@ -1393,7 +1393,7 @@ static void ssd130x_primary_plane_reset(struct drm_plane *plane)
{
struct ssd130x_plane_state *ssd130x_state;
- WARN_ON(plane->state);
+ drm_WARN_ON_ONCE(plane->dev, plane->state);
ssd130x_state = kzalloc(sizeof(*ssd130x_state), GFP_KERNEL);
if (!ssd130x_state)
@@ -1408,7 +1408,7 @@ static struct drm_plane_state *ssd130x_primary_plane_duplicate_state(struct drm_
struct ssd130x_plane_state *old_ssd130x_state;
struct ssd130x_plane_state *ssd130x_state;
- if (WARN_ON(!plane->state))
+ if (drm_WARN_ON_ONCE(plane->dev, !plane->state))
return NULL;
old_ssd130x_state = to_ssd130x_plane_state(plane->state);
@@ -1473,15 +1473,7 @@ static enum drm_mode_status ssd130x_crtc_mode_valid(struct drm_crtc *crtc,
{
struct ssd130x_device *ssd130x = drm_to_ssd130x(crtc->dev);
- if (mode->hdisplay != ssd130x->mode.hdisplay &&
- mode->vdisplay != ssd130x->mode.vdisplay)
- return MODE_ONE_SIZE;
- else if (mode->hdisplay != ssd130x->mode.hdisplay)
- return MODE_ONE_WIDTH;
- else if (mode->vdisplay != ssd130x->mode.vdisplay)
- return MODE_ONE_HEIGHT;
-
- return MODE_OK;
+ return drm_crtc_helper_mode_valid_fixed(crtc, mode, &ssd130x->mode);
}
static int ssd130x_crtc_atomic_check(struct drm_crtc *crtc,
@@ -1498,7 +1490,7 @@ static int ssd130x_crtc_atomic_check(struct drm_crtc *crtc,
if (ret)
return ret;
- ssd130x_state->data_array = kmalloc(ssd130x->width * pages, GFP_KERNEL);
+ ssd130x_state->data_array = kmalloc_array(ssd130x->width, pages, GFP_KERNEL);
if (!ssd130x_state->data_array)
return -ENOMEM;
@@ -1519,7 +1511,7 @@ static int ssd132x_crtc_atomic_check(struct drm_crtc *crtc,
if (ret)
return ret;
- ssd130x_state->data_array = kmalloc(columns * ssd130x->height, GFP_KERNEL);
+ ssd130x_state->data_array = kmalloc_array(columns, ssd130x->height, GFP_KERNEL);
if (!ssd130x_state->data_array)
return -ENOMEM;
@@ -1546,7 +1538,7 @@ static int ssd133x_crtc_atomic_check(struct drm_crtc *crtc,
pitch = drm_format_info_min_pitch(fi, 0, ssd130x->width);
- ssd130x_state->data_array = kmalloc(pitch * ssd130x->height, GFP_KERNEL);
+ ssd130x_state->data_array = kmalloc_array(pitch, ssd130x->height, GFP_KERNEL);
if (!ssd130x_state->data_array)
return -ENOMEM;
@@ -1558,7 +1550,7 @@ static void ssd130x_crtc_reset(struct drm_crtc *crtc)
{
struct ssd130x_crtc_state *ssd130x_state;
- WARN_ON(crtc->state);
+ drm_WARN_ON_ONCE(crtc->dev, crtc->state);
ssd130x_state = kzalloc(sizeof(*ssd130x_state), GFP_KERNEL);
if (!ssd130x_state)
@@ -1572,7 +1564,7 @@ static struct drm_crtc_state *ssd130x_crtc_duplicate_state(struct drm_crtc *crtc
struct ssd130x_crtc_state *old_ssd130x_state;
struct ssd130x_crtc_state *ssd130x_state;
- if (WARN_ON(!crtc->state))
+ if (drm_WARN_ON_ONCE(crtc->dev, !crtc->state))
return NULL;
old_ssd130x_state = to_ssd130x_crtc_state(crtc->state);
@@ -1740,20 +1732,8 @@ static const struct drm_encoder_funcs ssd130x_encoder_funcs = {
static int ssd130x_connector_get_modes(struct drm_connector *connector)
{
struct ssd130x_device *ssd130x = drm_to_ssd130x(connector->dev);
- struct drm_display_mode *mode;
- struct device *dev = ssd130x->dev;
-
- mode = drm_mode_duplicate(connector->dev, &ssd130x->mode);
- if (!mode) {
- dev_err(dev, "Failed to duplicated mode\n");
- return 0;
- }
-
- drm_mode_probed_add(connector, mode);
- drm_set_preferred_mode(connector, mode->hdisplay, mode->vdisplay);
- /* There is only a single mode */
- return 1;
+ return drm_connector_helper_get_modes_fixed(connector, &ssd130x->mode);
}
static const struct drm_connector_helper_funcs ssd130x_connector_helper_funcs = {
@@ -1887,10 +1867,14 @@ static int ssd130x_init_modeset(struct ssd130x_device *ssd130x)
mode->type = DRM_MODE_TYPE_DRIVER;
mode->clock = 1;
- mode->hdisplay = mode->htotal = ssd130x->width;
- mode->hsync_start = mode->hsync_end = ssd130x->width;
- mode->vdisplay = mode->vtotal = ssd130x->height;
- mode->vsync_start = mode->vsync_end = ssd130x->height;
+ mode->hdisplay = ssd130x->width;
+ mode->htotal = ssd130x->width;
+ mode->hsync_start = ssd130x->width;
+ mode->hsync_end = ssd130x->width;
+ mode->vdisplay = ssd130x->height;
+ mode->vtotal = ssd130x->height;
+ mode->vsync_start = ssd130x->height;
+ mode->vsync_end = ssd130x->height;
mode->width_mm = 27;
mode->height_mm = 27;
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index 2c7bc064bc66..58eae6804cc8 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -274,8 +274,8 @@ static unsigned long dw_mipi_dsi_clk_recalc_rate(struct clk_hw *hw,
return (unsigned long)pll_out_khz * 1000;
}
-static long dw_mipi_dsi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int dw_mipi_dsi_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dw_mipi_dsi_stm *dsi = clk_to_dw_mipi_dsi_stm(hw);
unsigned int idf, ndiv, odf, pll_in_khz, pll_out_khz;
@@ -283,14 +283,14 @@ static long dw_mipi_dsi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
DRM_DEBUG_DRIVER("\n");
- pll_in_khz = (unsigned int)(*parent_rate / 1000);
+ pll_in_khz = (unsigned int)(req->best_parent_rate / 1000);
/* Compute best pll parameters */
idf = 0;
ndiv = 0;
odf = 0;
- ret = dsi_pll_get_params(dsi, pll_in_khz, rate / 1000,
+ ret = dsi_pll_get_params(dsi, pll_in_khz, req->rate / 1000,
&idf, &ndiv, &odf);
if (ret)
DRM_WARN("Warning dsi_pll_get_params(): bad params\n");
@@ -298,7 +298,9 @@ static long dw_mipi_dsi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
/* Get the adjusted pll out value */
pll_out_khz = dsi_pll_get_clkout_khz(pll_in_khz, idf, ndiv, odf);
- return pll_out_khz * 1000;
+ req->rate = pll_out_khz * 1000;
+
+ return 0;
}
static int dw_mipi_dsi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -351,7 +353,7 @@ static const struct clk_ops dw_mipi_dsi_stm_clk_ops = {
.disable = dw_mipi_dsi_clk_disable,
.is_enabled = dw_mipi_dsi_clk_is_enabled,
.recalc_rate = dw_mipi_dsi_clk_recalc_rate,
- .round_rate = dw_mipi_dsi_clk_round_rate,
+ .determine_rate = dw_mipi_dsi_clk_determine_rate,
.set_rate = dw_mipi_dsi_clk_set_rate,
};
diff --git a/drivers/gpu/drm/stm/lvds.c b/drivers/gpu/drm/stm/lvds.c
index 07788e8d3d83..fe38c0984b2b 100644
--- a/drivers/gpu/drm/stm/lvds.c
+++ b/drivers/gpu/drm/stm/lvds.c
@@ -682,8 +682,8 @@ static unsigned long lvds_pixel_clk_recalc_rate(struct clk_hw *hw,
return (unsigned long)lvds->pixel_clock_rate;
}
-static long lvds_pixel_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int lvds_pixel_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct stm_lvds *lvds = container_of(hw, struct stm_lvds, lvds_ck_px);
unsigned int pll_in_khz, bdiv = 0, mdiv = 0, ndiv = 0;
@@ -703,7 +703,7 @@ static long lvds_pixel_clk_round_rate(struct clk_hw *hw, unsigned long rate,
mode = list_first_entry(&connector->modes,
struct drm_display_mode, head);
- pll_in_khz = (unsigned int)(*parent_rate / 1000);
+ pll_in_khz = (unsigned int)(req->best_parent_rate / 1000);
if (lvds_is_dual_link(lvds->link_type))
multiplier = 2;
@@ -719,14 +719,16 @@ static long lvds_pixel_clk_round_rate(struct clk_hw *hw, unsigned long rate,
lvds->pixel_clock_rate = (unsigned long)pll_get_clkout_khz(pll_in_khz, bdiv, mdiv, ndiv)
* 1000 * multiplier / 7;
- return lvds->pixel_clock_rate;
+ req->rate = lvds->pixel_clock_rate;
+
+ return 0;
}
static const struct clk_ops lvds_pixel_clk_ops = {
.enable = lvds_pixel_clk_enable,
.disable = lvds_pixel_clk_disable,
.recalc_rate = lvds_pixel_clk_recalc_rate,
- .round_rate = lvds_pixel_clk_round_rate,
+ .determine_rate = lvds_pixel_clk_determine_rate,
};
static const struct clk_init_data clk_data = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
index 12430b9d4e93..b1beadb9bb59 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
@@ -59,13 +59,15 @@ static unsigned long sun4i_ddc_calc_divider(unsigned long rate,
return best_rate;
}
-static long sun4i_ddc_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int sun4i_ddc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sun4i_ddc *ddc = hw_to_ddc(hw);
- return sun4i_ddc_calc_divider(rate, *prate, ddc->pre_div,
- ddc->m_offset, NULL, NULL);
+ req->rate = sun4i_ddc_calc_divider(req->rate, req->best_parent_rate,
+ ddc->pre_div, ddc->m_offset, NULL, NULL);
+
+ return 0;
}
static unsigned long sun4i_ddc_recalc_rate(struct clk_hw *hw,
@@ -101,7 +103,7 @@ static int sun4i_ddc_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops sun4i_ddc_ops = {
.recalc_rate = sun4i_ddc_recalc_rate,
- .round_rate = sun4i_ddc_round_rate,
+ .determine_rate = sun4i_ddc_determine_rate,
.set_rate = sun4i_ddc_set_rate,
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c b/drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c
index 03d7de1911cd..4afb12bd5281 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c
@@ -67,8 +67,8 @@ static unsigned long sun4i_dclk_recalc_rate(struct clk_hw *hw,
return parent_rate / val;
}
-static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int sun4i_dclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sun4i_dclk *dclk = hw_to_dclk(hw);
struct sun4i_tcon *tcon = dclk->tcon;
@@ -77,7 +77,7 @@ static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
int i;
for (i = tcon->dclk_min_div; i <= tcon->dclk_max_div; i++) {
- u64 ideal = (u64)rate * i;
+ u64 ideal = (u64)req->rate * i;
unsigned long rounded;
/*
@@ -99,17 +99,19 @@ static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
goto out;
}
- if (abs(rate - rounded / i) <
- abs(rate - best_parent / best_div)) {
+ if (abs(req->rate - rounded / i) <
+ abs(req->rate - best_parent / best_div)) {
best_parent = rounded;
best_div = i;
}
}
out:
- *parent_rate = best_parent;
+ req->best_parent_rate = best_parent;
- return best_parent / best_div;
+ req->rate = best_parent / best_div;
+
+ return 0;
}
static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -155,7 +157,7 @@ static const struct clk_ops sun4i_dclk_ops = {
.is_enabled = sun4i_dclk_is_enabled,
.recalc_rate = sun4i_dclk_recalc_rate,
- .round_rate = sun4i_dclk_round_rate,
+ .determine_rate = sun4i_dclk_determine_rate,
.set_rate = sun4i_dclk_set_rate,
.get_phase = sun4i_dclk_get_phase,
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index f97be0040aab..94ac6ad6f306 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -206,8 +206,7 @@ static int sun8i_ui_layer_atomic_check(struct drm_plane *plane,
if (!crtc)
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index a09ee4097537..1f77e1d29845 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -327,8 +327,7 @@ static int sun8i_vi_layer_atomic_check(struct drm_plane *plane,
if (!crtc)
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
index 89633e30ca62..da670d7eeb2e 100644
--- a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
@@ -10,12 +10,19 @@
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_modes.h>
struct drm_format_info;
struct drm_scanout_buffer;
struct screen_info;
+typedef void (*drm_sysfb_blit_func)(struct iosys_map *, const unsigned int *,
+ const struct iosys_map *,
+ const struct drm_framebuffer *,
+ const struct drm_rect *,
+ struct drm_format_conv_state *);
+
/*
* Input parsing
*/
@@ -93,10 +100,25 @@ static inline struct drm_sysfb_device *to_drm_sysfb_device(struct drm_device *de
* Plane
*/
+struct drm_sysfb_plane_state {
+ struct drm_shadow_plane_state base;
+
+ /* transfers framebuffer data to scanout buffer in CRTC format */
+ drm_sysfb_blit_func blit_to_crtc;
+};
+
+static inline struct drm_sysfb_plane_state *
+to_drm_sysfb_plane_state(struct drm_plane_state *base)
+{
+ return container_of(to_drm_shadow_plane_state(base), struct drm_sysfb_plane_state, base);
+}
+
size_t drm_sysfb_build_fourcc_list(struct drm_device *dev,
const u32 *native_fourccs, size_t native_nfourccs,
u32 *fourccs_out, size_t nfourccs_out);
+int drm_sysfb_plane_helper_begin_fb_access(struct drm_plane *plane,
+ struct drm_plane_state *plane_state);
int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *new_state);
void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane,
@@ -114,16 +136,24 @@ int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane,
DRM_FORMAT_MOD_INVALID
#define DRM_SYSFB_PLANE_HELPER_FUNCS \
- DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, \
+ .begin_fb_access = drm_sysfb_plane_helper_begin_fb_access, \
+ .end_fb_access = drm_gem_end_shadow_fb_access, \
.atomic_check = drm_sysfb_plane_helper_atomic_check, \
.atomic_update = drm_sysfb_plane_helper_atomic_update, \
.atomic_disable = drm_sysfb_plane_helper_atomic_disable, \
.get_scanout_buffer = drm_sysfb_plane_helper_get_scanout_buffer
+void drm_sysfb_plane_reset(struct drm_plane *plane);
+struct drm_plane_state *drm_sysfb_plane_atomic_duplicate_state(struct drm_plane *plane);
+void drm_sysfb_plane_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *plane_state);
+
#define DRM_SYSFB_PLANE_FUNCS \
+ .reset = drm_sysfb_plane_reset, \
.update_plane = drm_atomic_helper_update_plane, \
.disable_plane = drm_atomic_helper_disable_plane, \
- DRM_GEM_SHADOW_PLANE_FUNCS
+ .atomic_duplicate_state = drm_sysfb_plane_atomic_duplicate_state, \
+ .atomic_destroy_state = drm_sysfb_plane_atomic_destroy_state
/*
* CRTC
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
index ddb4a7523ee6..8517c490e815 100644
--- a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
@@ -11,7 +11,6 @@
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_panic.h>
#include <drm/drm_print.h>
@@ -185,6 +184,104 @@ size_t drm_sysfb_build_fourcc_list(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_sysfb_build_fourcc_list);
+static void drm_sysfb_plane_state_destroy(struct drm_sysfb_plane_state *sysfb_plane_state)
+{
+ __drm_gem_destroy_shadow_plane_state(&sysfb_plane_state->base);
+
+ kfree(sysfb_plane_state);
+}
+
+static void drm_sysfb_memcpy(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
+{
+ drm_fb_memcpy(dst, dst_pitch, src, fb, clip);
+}
+
+static drm_sysfb_blit_func drm_sysfb_get_blit_func(u32 dst_format, u32 src_format)
+{
+ if (src_format == dst_format) {
+ return drm_sysfb_memcpy;
+ } else if (src_format == DRM_FORMAT_XRGB8888) {
+ switch (dst_format) {
+ case DRM_FORMAT_RGB565:
+ return drm_fb_xrgb8888_to_rgb565;
+ case DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN:
+ return drm_fb_xrgb8888_to_rgb565be;
+ case DRM_FORMAT_XRGB1555:
+ return drm_fb_xrgb8888_to_xrgb1555;
+ case DRM_FORMAT_ARGB1555:
+ return drm_fb_xrgb8888_to_argb1555;
+ case DRM_FORMAT_RGBA5551:
+ return drm_fb_xrgb8888_to_rgba5551;
+ case DRM_FORMAT_RGB888:
+ return drm_fb_xrgb8888_to_rgb888;
+ case DRM_FORMAT_BGR888:
+ return drm_fb_xrgb8888_to_bgr888;
+ case DRM_FORMAT_ARGB8888:
+ return drm_fb_xrgb8888_to_argb8888;
+ case DRM_FORMAT_XBGR8888:
+ return drm_fb_xrgb8888_to_xbgr8888;
+ case DRM_FORMAT_ABGR8888:
+ return drm_fb_xrgb8888_to_abgr8888;
+ case DRM_FORMAT_XRGB2101010:
+ return drm_fb_xrgb8888_to_xrgb2101010;
+ case DRM_FORMAT_ARGB2101010:
+ return drm_fb_xrgb8888_to_argb2101010;
+ case DRM_FORMAT_BGRX8888:
+ return drm_fb_xrgb8888_to_bgrx8888;
+ case DRM_FORMAT_RGB332:
+ return drm_fb_xrgb8888_to_rgb332;
+ }
+ }
+
+ return NULL;
+}
+
+int drm_sysfb_plane_helper_begin_fb_access(struct drm_plane *plane,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_sysfb_plane_state *sysfb_plane_state = to_drm_sysfb_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_crtc_state *crtc_state;
+ struct drm_sysfb_crtc_state *sysfb_crtc_state;
+ drm_sysfb_blit_func blit_to_crtc;
+ int ret;
+
+ ret = drm_gem_begin_shadow_fb_access(plane, plane_state);
+ if (ret)
+ return ret;
+
+ if (!fb)
+ return 0;
+
+ ret = -EINVAL;
+
+ crtc_state = drm_atomic_get_crtc_state(plane_state->state, plane_state->crtc);
+ if (drm_WARN_ON_ONCE(dev, !crtc_state))
+ goto err_drm_gem_end_shadow_fb_access;
+ sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state);
+
+ if (drm_WARN_ON_ONCE(dev, !sysfb_crtc_state->format))
+ goto err_drm_gem_end_shadow_fb_access;
+ blit_to_crtc = drm_sysfb_get_blit_func(sysfb_crtc_state->format->format,
+ fb->format->format);
+ if (!blit_to_crtc) {
+ drm_warn_once(dev, "No blit helper from %p4cc to %p4cc found.\n",
+ &fb->format->format, &sysfb_crtc_state->format->format);
+ goto err_drm_gem_end_shadow_fb_access;
+ }
+ sysfb_plane_state->blit_to_crtc = blit_to_crtc;
+
+ return 0;
+
+err_drm_gem_end_shadow_fb_access:
+ drm_gem_end_shadow_fb_access(plane, plane_state);
+ return ret;
+}
+EXPORT_SYMBOL(drm_sysfb_plane_helper_begin_fb_access);
+
int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *new_state)
{
@@ -235,12 +332,14 @@ void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane, struct drm_at
struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev);
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_sysfb_plane_state *sysfb_plane_state = to_drm_sysfb_plane_state(plane_state);
+ struct drm_shadow_plane_state *shadow_plane_state = &sysfb_plane_state->base;
struct drm_framebuffer *fb = plane_state->fb;
unsigned int dst_pitch = sysfb->fb_pitch;
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
struct drm_sysfb_crtc_state *sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state);
const struct drm_format_info *dst_format = sysfb_crtc_state->format;
+ drm_sysfb_blit_func blit_to_crtc = sysfb_plane_state->blit_to_crtc;
struct drm_atomic_helper_damage_iter iter;
struct drm_rect damage;
int ret, idx;
@@ -261,8 +360,8 @@ void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane, struct drm_at
continue;
iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip));
- drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb,
- &damage, &shadow_plane_state->fmtcnv_state);
+ blit_to_crtc(&dst, &dst_pitch, shadow_plane_state->data, fb, &damage,
+ &shadow_plane_state->fmtcnv_state);
}
drm_dev_exit(idx);
@@ -321,6 +420,52 @@ int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane,
}
EXPORT_SYMBOL(drm_sysfb_plane_helper_get_scanout_buffer);
+void drm_sysfb_plane_reset(struct drm_plane *plane)
+{
+ struct drm_sysfb_plane_state *sysfb_plane_state;
+
+ if (plane->state)
+ drm_sysfb_plane_state_destroy(to_drm_sysfb_plane_state(plane->state));
+
+ sysfb_plane_state = kzalloc(sizeof(*sysfb_plane_state), GFP_KERNEL);
+ if (sysfb_plane_state)
+ __drm_gem_reset_shadow_plane(plane, &sysfb_plane_state->base);
+ else
+ __drm_gem_reset_shadow_plane(plane, NULL);
+}
+EXPORT_SYMBOL(drm_sysfb_plane_reset);
+
+struct drm_plane_state *drm_sysfb_plane_atomic_duplicate_state(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_plane_state *plane_state = plane->state;
+ struct drm_sysfb_plane_state *sysfb_plane_state;
+ struct drm_sysfb_plane_state *new_sysfb_plane_state;
+ struct drm_shadow_plane_state *new_shadow_plane_state;
+
+ if (drm_WARN_ON(dev, !plane_state))
+ return NULL;
+ sysfb_plane_state = to_drm_sysfb_plane_state(plane_state);
+
+ new_sysfb_plane_state = kzalloc(sizeof(*new_sysfb_plane_state), GFP_KERNEL);
+ if (!new_sysfb_plane_state)
+ return NULL;
+ new_shadow_plane_state = &new_sysfb_plane_state->base;
+
+ __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state);
+ new_sysfb_plane_state->blit_to_crtc = sysfb_plane_state->blit_to_crtc;
+
+ return &new_shadow_plane_state->base;
+}
+EXPORT_SYMBOL(drm_sysfb_plane_atomic_duplicate_state);
+
+void drm_sysfb_plane_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *plane_state)
+{
+ drm_sysfb_plane_state_destroy(to_drm_sysfb_plane_state(plane_state));
+}
+EXPORT_SYMBOL(drm_sysfb_plane_atomic_destroy_state);
+
/*
* CRTC
*/
diff --git a/drivers/gpu/drm/sysfb/simpledrm.c b/drivers/gpu/drm/sysfb/simpledrm.c
index 0358164a623c..9b16d5164ef4 100644
--- a/drivers/gpu/drm/sysfb/simpledrm.c
+++ b/drivers/gpu/drm/sysfb/simpledrm.c
@@ -2,8 +2,9 @@
#include <linux/aperture.h>
#include <linux/clk.h>
-#include <linux/of_clk.h>
#include <linux/minmax.h>
+#include <linux/of_address.h>
+#include <linux/of_clk.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/sysfb/vesadrm.c b/drivers/gpu/drm/sysfb/vesadrm.c
index 16a4b52d45c6..c318df0adad5 100644
--- a/drivers/gpu/drm/sysfb/vesadrm.c
+++ b/drivers/gpu/drm/sysfb/vesadrm.c
@@ -295,7 +295,8 @@ static int vesadrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs vesadrm_primary_plane_helper_funcs = {
- DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .begin_fb_access = drm_sysfb_plane_helper_begin_fb_access,
+ .end_fb_access = drm_gem_end_shadow_fb_access,
.atomic_check = vesadrm_primary_plane_helper_atomic_check,
.atomic_update = drm_sysfb_plane_helper_atomic_update,
.atomic_disable = drm_sysfb_plane_helper_atomic_disable,
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 59d5c1ba145a..0f80da3544c9 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1033,7 +1033,7 @@ static int tegra_cursor_atomic_async_check(struct drm_plane *plane, struct drm_a
int min_scale, max_scale;
int err;
- crtc_state = drm_atomic_get_existing_crtc_state(state, new_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 8ede07fb7a21..6b14f1e919eb 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -16,6 +16,7 @@
#include <linux/vmalloc.h>
#include <drm/drm_drv.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_prime.h>
#include "drm.h"
@@ -542,12 +543,13 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
struct drm_mode_create_dumb *args)
{
- unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
struct tegra_drm *tegra = drm->dev_private;
struct tegra_bo *bo;
+ int ret;
- args->pitch = round_up(min_pitch, tegra->pitch_align);
- args->size = args->pitch * args->height;
+ ret = drm_mode_size_dumb(drm, args, tegra->pitch_align, 0);
+ if (ret)
+ return ret;
bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
&args->handle);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 8cd2969e7d4b..c4820f5e7658 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -658,7 +658,7 @@ static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, const void *data,
{
const u8 *ptr = data;
unsigned long offset;
- size_t i, j;
+ size_t i;
u32 value;
switch (ptr[0]) {
@@ -691,7 +691,7 @@ static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, const void *data,
* - subpack_low: bytes 0 - 3
* - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
*/
- for (i = 3, j = 0; i < size; i += 7, j += 8) {
+ for (i = 3; i < size; i += 7) {
size_t rem = size - i, num = min_t(size_t, rem, 4);
value = tegra_hdmi_subpack(&ptr[i], num);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 21f3dfdcc5c9..bc7dd562cf6b 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -1864,7 +1864,7 @@ static void tegra_sor_hdmi_write_infopack(struct tegra_sor *sor,
{
const u8 *ptr = data;
unsigned long offset;
- size_t i, j;
+ size_t i;
u32 value;
switch (ptr[0]) {
@@ -1897,7 +1897,7 @@ static void tegra_sor_hdmi_write_infopack(struct tegra_sor *sor,
* - subpack_low: bytes 0 - 3
* - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
*/
- for (i = 3, j = 0; i < size; i += 7, j += 8) {
+ for (i = 3; i < size; i += 7) {
size_t rem = size - i, num = min_t(size_t, rem, 4);
value = tegra_sor_hdmi_subpack(&ptr[i], num);
diff --git a/drivers/gpu/drm/tests/.kunitconfig b/drivers/gpu/drm/tests/.kunitconfig
index 6ec04b4c979d..5be8e71f45d5 100644
--- a/drivers/gpu/drm/tests/.kunitconfig
+++ b/drivers/gpu/drm/tests/.kunitconfig
@@ -1,3 +1,5 @@
CONFIG_KUNIT=y
CONFIG_DRM=y
+CONFIG_DRM_VKMS=y
+CONFIG_DRM_FBDEV_EMULATION=y
CONFIG_DRM_KUNIT_TEST=y
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
index 7a0e523651f0..5f40b5343bd8 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -21,6 +21,110 @@ static inline u64 get_size(int order, u64 chunk_size)
return (1 << order) * chunk_size;
}
+static void drm_test_buddy_fragmentation_performance(struct kunit *test)
+{
+ struct drm_buddy_block *block, *tmp;
+ int num_blocks, i, ret, count = 0;
+ LIST_HEAD(allocated_blocks);
+ unsigned long elapsed_ms;
+ LIST_HEAD(reverse_list);
+ LIST_HEAD(test_blocks);
+ LIST_HEAD(clear_list);
+ LIST_HEAD(dirty_list);
+ LIST_HEAD(free_list);
+ struct drm_buddy mm;
+ u64 mm_size = SZ_4G;
+ ktime_t start, end;
+
+ /*
+ * Allocation under severe fragmentation
+ *
+ * Create severe fragmentation by allocating the entire 4 GiB address space
+ * as tiny 8 KiB blocks but forcing a 64 KiB alignment. The resulting pattern
+ * leaves many scattered holes. Split the allocations into two groups and
+ * return them with different flags to block coalescing, then repeatedly
+ * allocate and free 64 KiB blocks while timing the loop. This stresses how
+ * quickly the allocator can satisfy larger, aligned requests from a pool of
+ * highly fragmented space.
+ */
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
+ "buddy_init failed\n");
+
+ num_blocks = mm_size / SZ_64K;
+
+ start = ktime_get();
+ /* Allocate with maximum fragmentation - 8K blocks with 64K alignment */
+ for (i = 0; i < num_blocks; i++)
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_8K, SZ_64K,
+ &allocated_blocks, 0),
+ "buddy_alloc hit an error size=%u\n", SZ_8K);
+
+ list_for_each_entry_safe(block, tmp, &allocated_blocks, link) {
+ if (count % 4 == 0 || count % 4 == 3)
+ list_move_tail(&block->link, &clear_list);
+ else
+ list_move_tail(&block->link, &dirty_list);
+ count++;
+ }
+
+ /* Free with different flags to ensure no coalescing */
+ drm_buddy_free_list(&mm, &clear_list, DRM_BUDDY_CLEARED);
+ drm_buddy_free_list(&mm, &dirty_list, 0);
+
+ for (i = 0; i < num_blocks; i++)
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_64K, SZ_64K,
+ &test_blocks, 0),
+ "buddy_alloc hit an error size=%u\n", SZ_64K);
+ drm_buddy_free_list(&mm, &test_blocks, 0);
+
+ end = ktime_get();
+ elapsed_ms = ktime_to_ms(ktime_sub(end, start));
+
+ kunit_info(test, "Fragmented allocation took %lu ms\n", elapsed_ms);
+
+ drm_buddy_fini(&mm);
+
+ /*
+ * Reverse free order under fragmentation
+ *
+ * Construct a fragmented 4 GiB space by allocating every 8 KiB block with
+ * 64 KiB alignment, creating a dense scatter of small regions. Half of the
+ * blocks are selectively freed to form sparse gaps, while the remaining
+ * allocations are preserved, reordered in reverse, and released back with
+ * the cleared flag. This models a pathological reverse-ordered free pattern
+ * and measures how quickly the allocator can merge and reclaim space when
+ * deallocation occurs in the opposite order of allocation, exposing the
+ * cost difference between a linear freelist scan and an ordered tree lookup.
+ */
+ ret = drm_buddy_init(&mm, mm_size, SZ_4K);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ start = ktime_get();
+ /* Allocate maximum fragmentation */
+ for (i = 0; i < num_blocks; i++)
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_8K, SZ_64K,
+ &allocated_blocks, 0),
+ "buddy_alloc hit an error size=%u\n", SZ_8K);
+
+ list_for_each_entry_safe(block, tmp, &allocated_blocks, link) {
+ if (count % 2 == 0)
+ list_move_tail(&block->link, &free_list);
+ count++;
+ }
+ drm_buddy_free_list(&mm, &free_list, DRM_BUDDY_CLEARED);
+
+ list_for_each_entry_safe_reverse(block, tmp, &allocated_blocks, link)
+ list_move(&block->link, &reverse_list);
+ drm_buddy_free_list(&mm, &reverse_list, DRM_BUDDY_CLEARED);
+
+ end = ktime_get();
+ elapsed_ms = ktime_to_ms(ktime_sub(end, start));
+
+ kunit_info(test, "Reverse-ordered free took %lu ms\n", elapsed_ms);
+
+ drm_buddy_fini(&mm);
+}
+
static void drm_test_buddy_alloc_range_bias(struct kunit *test)
{
u32 mm_size, size, ps, bias_size, bias_start, bias_end, bias_rem;
@@ -772,6 +876,7 @@ static struct kunit_case drm_buddy_tests[] = {
KUNIT_CASE(drm_test_buddy_alloc_contiguous),
KUNIT_CASE(drm_test_buddy_alloc_clear),
KUNIT_CASE(drm_test_buddy_alloc_range_bias),
+ KUNIT_CASE(drm_test_buddy_fragmentation_performance),
{}
};
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
index da89fd01c337..8fcc6a2f9477 100644
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -94,8 +94,6 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_display_mode *mode;
enum drm_mode_status ok;
- dev_dbg(ddev->dev, "%s\n", __func__);
-
if (!crtc_state->enable)
return 0;
@@ -103,7 +101,7 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
ok = dispc_vp_mode_valid(dispc, hw_videoport, mode);
if (ok != MODE_OK) {
- dev_dbg(ddev->dev, "%s: bad mode: %ux%u pclk %u kHz\n",
+ drm_dbg(ddev, "%s: bad mode: %ux%u pclk %u kHz\n",
__func__, mode->hdisplay, mode->vdisplay, mode->clock);
return -EINVAL;
}
@@ -172,7 +170,7 @@ static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
struct tidss_device *tidss = to_tidss(ddev);
unsigned long flags;
- dev_dbg(ddev->dev, "%s: %s is %sactive, %s modeset, event %p\n",
+ drm_dbg(ddev, "%s: %s is %sactive, %s modeset, event %p\n",
__func__, crtc->name, crtc->state->active ? "" : "not ",
drm_atomic_crtc_needs_modeset(crtc->state) ? "needs" : "doesn't need",
crtc->state->event);
@@ -328,8 +326,6 @@ static int tidss_crtc_enable_vblank(struct drm_crtc *crtc)
struct drm_device *ddev = crtc->dev;
struct tidss_device *tidss = to_tidss(ddev);
- dev_dbg(ddev->dev, "%s\n", __func__);
-
tidss_runtime_get(tidss);
tidss_irq_enable_vblank(crtc);
@@ -342,29 +338,34 @@ static void tidss_crtc_disable_vblank(struct drm_crtc *crtc)
struct drm_device *ddev = crtc->dev;
struct tidss_device *tidss = to_tidss(ddev);
- dev_dbg(ddev->dev, "%s\n", __func__);
-
tidss_irq_disable_vblank(crtc);
tidss_runtime_put(tidss);
}
+static void tidss_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct tidss_crtc_state *tstate = to_tidss_crtc_state(state);
+
+ __drm_atomic_helper_crtc_destroy_state(&tstate->base);
+ kfree(tstate);
+}
+
static void tidss_crtc_reset(struct drm_crtc *crtc)
{
- struct tidss_crtc_state *tcrtc;
+ struct tidss_crtc_state *tstate;
if (crtc->state)
- __drm_atomic_helper_crtc_destroy_state(crtc->state);
+ tidss_crtc_destroy_state(crtc, crtc->state);
- kfree(crtc->state);
-
- tcrtc = kzalloc(sizeof(*tcrtc), GFP_KERNEL);
- if (!tcrtc) {
+ tstate = kzalloc(sizeof(*tstate), GFP_KERNEL);
+ if (!tstate) {
crtc->state = NULL;
return;
}
- __drm_atomic_helper_crtc_reset(crtc, &tcrtc->base);
+ __drm_atomic_helper_crtc_reset(crtc, &tstate->base);
}
static struct drm_crtc_state *tidss_crtc_duplicate_state(struct drm_crtc *crtc)
@@ -404,7 +405,7 @@ static const struct drm_crtc_funcs tidss_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = tidss_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .atomic_destroy_state = tidss_crtc_destroy_state,
.enable_vblank = tidss_crtc_enable_vblank,
.disable_vblank = tidss_crtc_disable_vblank,
};
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index 7c8c15a5c39b..d0b191c470ca 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -1051,20 +1051,22 @@ struct dispc_bus_format *dispc_vp_find_bus_fmt(struct dispc_device *dispc,
int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport,
const struct drm_crtc_state *state)
{
+ struct tidss_device *tidss = dispc->tidss;
+ struct drm_device *dev = &tidss->ddev;
const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state);
const struct dispc_bus_format *fmt;
fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format,
tstate->bus_flags);
if (!fmt) {
- dev_dbg(dispc->dev, "%s: Unsupported bus format: %u\n",
+ drm_dbg(dev, "%s: Unsupported bus format: %u\n",
__func__, tstate->bus_format);
return -EINVAL;
}
if (dispc->feat->vp_bus_type[hw_videoport] != DISPC_VP_OLDI_AM65X &&
fmt->is_oldi_fmt) {
- dev_dbg(dispc->dev, "%s: %s is not OLDI-port\n",
+ drm_dbg(dev, "%s: %s is not OLDI-port\n",
__func__, dispc->feat->vp_name[hw_videoport]);
return -EINVAL;
}
@@ -2849,8 +2851,6 @@ int dispc_runtime_resume(struct dispc_device *dispc)
void dispc_remove(struct tidss_device *tidss)
{
- dev_dbg(tidss->dev, "%s\n", __func__);
-
tidss->dispc = NULL;
}
@@ -2992,8 +2992,6 @@ int dispc_init(struct tidss_device *tidss)
unsigned int i, num_fourccs;
int r = 0;
- dev_dbg(dev, "%s\n", __func__);
-
feat = tidss->feat;
if (feat->subrev != DISPC_K2G) {
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index 27d9a8fd541f..1c8cc18bc53c 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -33,8 +33,6 @@ int tidss_runtime_get(struct tidss_device *tidss)
{
int r;
- dev_dbg(tidss->dev, "%s\n", __func__);
-
r = pm_runtime_resume_and_get(tidss->dev);
WARN_ON(r < 0);
return r;
@@ -44,8 +42,6 @@ void tidss_runtime_put(struct tidss_device *tidss)
{
int r;
- dev_dbg(tidss->dev, "%s\n", __func__);
-
pm_runtime_mark_last_busy(tidss->dev);
r = pm_runtime_put_autosuspend(tidss->dev);
@@ -56,8 +52,6 @@ static int __maybe_unused tidss_pm_runtime_suspend(struct device *dev)
{
struct tidss_device *tidss = dev_get_drvdata(dev);
- dev_dbg(dev, "%s\n", __func__);
-
return dispc_runtime_suspend(tidss->dispc);
}
@@ -66,8 +60,6 @@ static int __maybe_unused tidss_pm_runtime_resume(struct device *dev)
struct tidss_device *tidss = dev_get_drvdata(dev);
int r;
- dev_dbg(dev, "%s\n", __func__);
-
r = dispc_runtime_resume(tidss->dispc);
if (r)
return r;
@@ -79,8 +71,6 @@ static int __maybe_unused tidss_suspend(struct device *dev)
{
struct tidss_device *tidss = dev_get_drvdata(dev);
- dev_dbg(dev, "%s\n", __func__);
-
return drm_mode_config_helper_suspend(&tidss->ddev);
}
@@ -88,8 +78,6 @@ static int __maybe_unused tidss_resume(struct device *dev)
{
struct tidss_device *tidss = dev_get_drvdata(dev);
- dev_dbg(dev, "%s\n", __func__);
-
return drm_mode_config_helper_resume(&tidss->ddev);
}
@@ -127,8 +115,6 @@ static int tidss_probe(struct platform_device *pdev)
int ret;
int irq;
- dev_dbg(dev, "%s\n", __func__);
-
tidss = devm_drm_dev_alloc(&pdev->dev, &tidss_driver,
struct tidss_device, ddev);
if (IS_ERR(tidss))
@@ -228,8 +214,6 @@ static void tidss_remove(struct platform_device *pdev)
struct tidss_device *tidss = platform_get_drvdata(pdev);
struct drm_device *ddev = &tidss->ddev;
- dev_dbg(dev, "%s\n", __func__);
-
drm_dev_unregister(ddev);
drm_atomic_helper_shutdown(ddev);
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index c34eb90cddbe..86eb5d97410b 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -24,8 +24,6 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
struct drm_device *ddev = old_state->dev;
struct tidss_device *tidss = to_tidss(ddev);
- dev_dbg(ddev->dev, "%s\n", __func__);
-
tidss_runtime_get(tidss);
drm_atomic_helper_commit_modeset_disables(ddev, old_state);
@@ -245,8 +243,6 @@ int tidss_modeset_init(struct tidss_device *tidss)
struct drm_device *ddev = &tidss->ddev;
int ret;
- dev_dbg(tidss->dev, "%s\n", __func__);
-
ret = drmm_mode_config_init(ddev);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index 142ae81951a0..bd10bc1b9961 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -42,8 +42,6 @@ static int tidss_plane_atomic_check(struct drm_plane *plane,
u32 hw_videoport;
int ret;
- dev_dbg(ddev->dev, "%s\n", __func__);
-
if (!new_plane_state->crtc) {
/*
* The visible field is not reset by the DRM core but only
@@ -124,8 +122,6 @@ static void tidss_plane_atomic_update(struct drm_plane *plane,
plane);
u32 hw_videoport;
- dev_dbg(ddev->dev, "%s\n", __func__);
-
if (!new_state->visible) {
dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
return;
@@ -143,8 +139,6 @@ static void tidss_plane_atomic_enable(struct drm_plane *plane,
struct tidss_device *tidss = to_tidss(ddev);
struct tidss_plane *tplane = to_tidss_plane(plane);
- dev_dbg(ddev->dev, "%s\n", __func__);
-
dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, true);
}
@@ -155,8 +149,6 @@ static void tidss_plane_atomic_disable(struct drm_plane *plane,
struct tidss_device *tidss = to_tidss(ddev);
struct tidss_plane *tplane = to_tidss_plane(plane);
- dev_dbg(ddev->dev, "%s\n", __func__);
-
dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index b5f60b2b2d0e..5718d9d83a49 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -676,14 +676,7 @@ static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
if (!crtc_state->active)
return 0;
- if (state->planes[0].ptr != crtc->primary ||
- state->planes[0].state == NULL ||
- state->planes[0].state->crtc != crtc) {
- dev_dbg(crtc->dev->dev, "CRTC primary plane must be present");
- return -EINVAL;
- }
-
- return 0;
+ return drm_atomic_helper_check_crtc_primary_plane(crtc_state);
}
static int tilcdc_crtc_enable_vblank(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
index cf77a8ce7398..aa72ca679598 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
@@ -42,8 +42,7 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- new_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
/* we should have a crtc state if the plane is attached to a crtc */
if (WARN_ON(!crtc_state))
return 0;
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index d2d5e9f1269f..71e874c19610 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -22,6 +22,8 @@
#include <drm/drm_panic.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_vblank_helper.h>
#include <video/vga.h>
@@ -526,6 +528,7 @@ static void bochs_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct bochs_device *bochs = to_bochs_device(crtc->dev);
bochs_hw_blank(bochs, false);
+ drm_crtc_vblank_on(crtc);
}
static void bochs_crtc_helper_atomic_disable(struct drm_crtc *crtc,
@@ -533,12 +536,14 @@ static void bochs_crtc_helper_atomic_disable(struct drm_crtc *crtc,
{
struct bochs_device *bochs = to_bochs_device(crtc->dev);
+ drm_crtc_vblank_off(crtc);
bochs_hw_blank(bochs, true);
}
static const struct drm_crtc_helper_funcs bochs_crtc_helper_funcs = {
.mode_set_nofb = bochs_crtc_helper_mode_set_nofb,
.atomic_check = bochs_crtc_helper_atomic_check,
+ .atomic_flush = drm_crtc_vblank_atomic_flush,
.atomic_enable = bochs_crtc_helper_atomic_enable,
.atomic_disable = bochs_crtc_helper_atomic_disable,
};
@@ -550,6 +555,7 @@ static const struct drm_crtc_funcs bochs_crtc_funcs = {
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ DRM_CRTC_VBLANK_TIMER_FUNCS,
};
static const struct drm_encoder_funcs bochs_encoder_funcs = {
@@ -670,6 +676,10 @@ static int bochs_kms_init(struct bochs_device *bochs)
drm_connector_attach_edid_property(connector);
drm_connector_attach_encoder(connector, encoder);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ret;
+
drm_mode_config_reset(dev);
return 0;
diff --git a/drivers/gpu/drm/tiny/cirrus-qemu.c b/drivers/gpu/drm/tiny/cirrus-qemu.c
index 97a93adc5669..f728fa48ac88 100644
--- a/drivers/gpu/drm/tiny/cirrus-qemu.c
+++ b/drivers/gpu/drm/tiny/cirrus-qemu.c
@@ -45,6 +45,8 @@
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_vblank_helper.h>
#define DRIVER_NAME "cirrus-qemu"
#define DRIVER_DESC "qemu cirrus vga"
@@ -404,11 +406,15 @@ static void cirrus_crtc_helper_atomic_enable(struct drm_crtc *crtc,
#endif
drm_dev_exit(idx);
+
+ drm_crtc_vblank_on(crtc);
}
static const struct drm_crtc_helper_funcs cirrus_crtc_helper_funcs = {
.atomic_check = cirrus_crtc_helper_atomic_check,
+ .atomic_flush = drm_crtc_vblank_atomic_flush,
.atomic_enable = cirrus_crtc_helper_atomic_enable,
+ .atomic_disable = drm_crtc_vblank_atomic_disable,
};
static const struct drm_crtc_funcs cirrus_crtc_funcs = {
@@ -418,6 +424,7 @@ static const struct drm_crtc_funcs cirrus_crtc_funcs = {
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ DRM_CRTC_VBLANK_TIMER_FUNCS,
};
static const struct drm_encoder_funcs cirrus_encoder_funcs = {
@@ -493,6 +500,10 @@ static int cirrus_pipe_init(struct cirrus_device *cirrus)
if (ret)
return ret;
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ret;
+
return 0;
}
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
index 6c77550c51af..5426b435f702 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
@@ -379,7 +379,7 @@ static void ttm_bo_unreserve_bulk(struct kunit *test)
dma_resv_fini(resv);
}
-static void ttm_bo_put_basic(struct kunit *test)
+static void ttm_bo_fini_basic(struct kunit *test)
{
struct ttm_test_devices *priv = test->priv;
struct ttm_buffer_object *bo;
@@ -410,7 +410,7 @@ static void ttm_bo_put_basic(struct kunit *test)
dma_resv_unlock(bo->base.resv);
KUNIT_EXPECT_EQ(test, err, 0);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static const char *mock_name(struct dma_fence *f)
@@ -423,7 +423,7 @@ static const struct dma_fence_ops mock_fence_ops = {
.get_timeline_name = mock_name,
};
-static void ttm_bo_put_shared_resv(struct kunit *test)
+static void ttm_bo_fini_shared_resv(struct kunit *test)
{
struct ttm_test_devices *priv = test->priv;
struct ttm_buffer_object *bo;
@@ -463,7 +463,7 @@ static void ttm_bo_put_shared_resv(struct kunit *test)
bo->type = ttm_bo_type_device;
bo->base.resv = external_resv;
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static void ttm_bo_pin_basic(struct kunit *test)
@@ -616,8 +616,8 @@ static struct kunit_case ttm_bo_test_cases[] = {
KUNIT_CASE(ttm_bo_unreserve_basic),
KUNIT_CASE(ttm_bo_unreserve_pinned),
KUNIT_CASE(ttm_bo_unreserve_bulk),
- KUNIT_CASE(ttm_bo_put_basic),
- KUNIT_CASE(ttm_bo_put_shared_resv),
+ KUNIT_CASE(ttm_bo_fini_basic),
+ KUNIT_CASE(ttm_bo_fini_shared_resv),
KUNIT_CASE(ttm_bo_pin_basic),
KUNIT_CASE(ttm_bo_pin_unpin_resource),
KUNIT_CASE(ttm_bo_multiple_pin_one_unpin),
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
index 1bcc67977f48..3a1eef83190c 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
@@ -144,7 +144,7 @@ static void ttm_bo_init_reserved_sys_man(struct kunit *test)
drm_mm_node_allocated(&bo->base.vma_node.vm_node));
ttm_resource_free(bo, &bo->resource);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static void ttm_bo_init_reserved_mock_man(struct kunit *test)
@@ -186,7 +186,7 @@ static void ttm_bo_init_reserved_mock_man(struct kunit *test)
drm_mm_node_allocated(&bo->base.vma_node.vm_node));
ttm_resource_free(bo, &bo->resource);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
}
@@ -221,7 +221,7 @@ static void ttm_bo_init_reserved_resv(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, bo->base.resv, &resv);
ttm_resource_free(bo, &bo->resource);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static void ttm_bo_validate_basic(struct kunit *test)
@@ -265,7 +265,7 @@ static void ttm_bo_validate_basic(struct kunit *test)
KUNIT_EXPECT_EQ(test, bo->resource->placement,
DRM_BUDDY_TOPDOWN_ALLOCATION);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
}
@@ -292,7 +292,7 @@ static void ttm_bo_validate_invalid_placement(struct kunit *test)
KUNIT_EXPECT_EQ(test, err, -ENOMEM);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static void ttm_bo_validate_failed_alloc(struct kunit *test)
@@ -321,7 +321,7 @@ static void ttm_bo_validate_failed_alloc(struct kunit *test)
KUNIT_EXPECT_EQ(test, err, -ENOMEM);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
ttm_bad_manager_fini(priv->ttm_dev, mem_type);
}
@@ -353,7 +353,7 @@ static void ttm_bo_validate_pinned(struct kunit *test)
ttm_bo_unpin(bo);
dma_resv_unlock(bo->base.resv);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static const struct ttm_bo_validate_test_case ttm_mem_type_cases[] = {
@@ -403,7 +403,7 @@ static void ttm_bo_validate_same_placement(struct kunit *test)
KUNIT_EXPECT_EQ(test, err, 0);
KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, 0);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
if (params->mem_type != TTM_PL_SYSTEM)
ttm_mock_manager_fini(priv->ttm_dev, params->mem_type);
@@ -452,7 +452,7 @@ static void ttm_bo_validate_busy_placement(struct kunit *test)
KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem);
KUNIT_ASSERT_TRUE(test, list_is_singular(&man->lru[bo->priority]));
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
ttm_bad_manager_fini(priv->ttm_dev, fst_mem);
ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
}
@@ -495,7 +495,7 @@ static void ttm_bo_validate_multihop(struct kunit *test)
KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2);
KUNIT_EXPECT_EQ(test, bo->resource->mem_type, final_mem);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
ttm_mock_manager_fini(priv->ttm_dev, fst_mem);
ttm_mock_manager_fini(priv->ttm_dev, tmp_mem);
@@ -567,7 +567,7 @@ static void ttm_bo_validate_no_placement_signaled(struct kunit *test)
KUNIT_ASSERT_TRUE(test, flags & TTM_TT_FLAG_ZERO_ALLOC);
}
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static int threaded_dma_resv_signal(void *arg)
@@ -635,7 +635,7 @@ static void ttm_bo_validate_no_placement_not_signaled(struct kunit *test)
/* Make sure we have an idle object at this point */
dma_resv_wait_timeout(bo->base.resv, usage, false, MAX_SCHEDULE_TIMEOUT);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static void ttm_bo_validate_move_fence_signaled(struct kunit *test)
@@ -668,7 +668,7 @@ static void ttm_bo_validate_move_fence_signaled(struct kunit *test)
KUNIT_EXPECT_EQ(test, bo->resource->mem_type, mem_type);
KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
dma_fence_put(man->move);
}
@@ -753,7 +753,7 @@ static void ttm_bo_validate_move_fence_not_signaled(struct kunit *test)
else
KUNIT_EXPECT_EQ(test, bo->resource->mem_type, fst_mem);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
ttm_mock_manager_fini(priv->ttm_dev, fst_mem);
ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
}
@@ -807,8 +807,8 @@ static void ttm_bo_validate_happy_evict(struct kunit *test)
KUNIT_EXPECT_EQ(test, bos[1].resource->mem_type, mem_type);
for (i = 0; i < bo_no; i++)
- ttm_bo_put(&bos[i]);
- ttm_bo_put(bo_val);
+ ttm_bo_fini(&bos[i]);
+ ttm_bo_fini(bo_val);
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
@@ -852,12 +852,12 @@ static void ttm_bo_validate_all_pinned_evict(struct kunit *test)
KUNIT_EXPECT_EQ(test, err, -ENOMEM);
- ttm_bo_put(bo_small);
+ ttm_bo_fini(bo_small);
ttm_bo_reserve(bo_big, false, false, NULL);
ttm_bo_unpin(bo_big);
dma_resv_unlock(bo_big->base.resv);
- ttm_bo_put(bo_big);
+ ttm_bo_fini(bo_big);
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
@@ -916,13 +916,13 @@ static void ttm_bo_validate_allowed_only_evict(struct kunit *test)
KUNIT_EXPECT_EQ(test, bo_evictable->resource->mem_type, mem_type_evict);
KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2 + BO_SIZE);
- ttm_bo_put(bo);
- ttm_bo_put(bo_evictable);
+ ttm_bo_fini(bo);
+ ttm_bo_fini(bo_evictable);
ttm_bo_reserve(bo_pinned, false, false, NULL);
ttm_bo_unpin(bo_pinned);
dma_resv_unlock(bo_pinned->base.resv);
- ttm_bo_put(bo_pinned);
+ ttm_bo_fini(bo_pinned);
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
@@ -973,8 +973,8 @@ static void ttm_bo_validate_deleted_evict(struct kunit *test)
KUNIT_EXPECT_NULL(test, bo_big->ttm);
KUNIT_EXPECT_NULL(test, bo_big->resource);
- ttm_bo_put(bo_small);
- ttm_bo_put(bo_big);
+ ttm_bo_fini(bo_small);
+ ttm_bo_fini(bo_big);
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
}
@@ -1025,8 +1025,8 @@ static void ttm_bo_validate_busy_domain_evict(struct kunit *test)
KUNIT_EXPECT_EQ(test, bo_init->resource->mem_type, mem_type);
KUNIT_EXPECT_NULL(test, bo_val->resource);
- ttm_bo_put(bo_init);
- ttm_bo_put(bo_val);
+ ttm_bo_fini(bo_init);
+ ttm_bo_fini(bo_val);
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
ttm_bad_manager_fini(priv->ttm_dev, mem_type_evict);
@@ -1070,8 +1070,8 @@ static void ttm_bo_validate_evict_gutting(struct kunit *test)
KUNIT_ASSERT_NULL(test, bo_evict->resource);
KUNIT_ASSERT_TRUE(test, bo_evict->ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC);
- ttm_bo_put(bo_evict);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo_evict);
+ ttm_bo_fini(bo);
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
}
@@ -1128,9 +1128,9 @@ static void ttm_bo_validate_recrusive_evict(struct kunit *test)
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
ttm_mock_manager_fini(priv->ttm_dev, mem_type_evict);
- ttm_bo_put(bo_val);
- ttm_bo_put(bo_tt);
- ttm_bo_put(bo_mock);
+ ttm_bo_fini(bo_val);
+ ttm_bo_fini(bo_tt);
+ ttm_bo_fini(bo_mock);
}
static struct kunit_case ttm_bo_validate_test_cases[] = {
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 29423ceeec5c..fba2a68a556e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -318,18 +318,17 @@ static void ttm_bo_release(struct kref *kref)
bo->destroy(bo);
}
-/**
- * ttm_bo_put
- *
- * @bo: The buffer object.
- *
- * Unreference a buffer object.
- */
+/* TODO: remove! */
void ttm_bo_put(struct ttm_buffer_object *bo)
{
kref_put(&bo->kref, ttm_bo_release);
}
-EXPORT_SYMBOL(ttm_bo_put);
+
+void ttm_bo_fini(struct ttm_buffer_object *bo)
+{
+ ttm_bo_put(bo);
+}
+EXPORT_SYMBOL(ttm_bo_fini);
static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_internal.h b/drivers/gpu/drm/ttm/ttm_bo_internal.h
index 9d8b747a34db..e0d48eac74b0 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_internal.h
+++ b/drivers/gpu/drm/ttm/ttm_bo_internal.h
@@ -55,4 +55,6 @@ ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
return bo;
}
+void ttm_bo_put(struct ttm_buffer_object *bo);
+
#endif
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index e2c82ad07eb4..d93d1bef6768 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -587,6 +587,9 @@ uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man)
{
uint64_t usage;
+ if (WARN_ON_ONCE(!man->bdev))
+ return 0;
+
spin_lock(&man->bdev->lru_lock);
usage = man->usage;
spin_unlock(&man->bdev->lru_lock);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index 9ff3bade9795..aa0dded595b6 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -262,8 +262,8 @@ static int vbox_primary_atomic_check(struct drm_plane *plane,
struct drm_crtc_state *crtc_state = NULL;
if (new_state->crtc) {
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- new_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
}
@@ -344,8 +344,8 @@ static int vbox_cursor_atomic_check(struct drm_plane *plane,
int ret;
if (new_state->crtc) {
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- new_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 123ab0ce1781..bb8c40be3250 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -35,6 +35,7 @@ config DRM_VC4_HDMI_CEC
bool "Broadcom VC4 HDMI CEC Support"
depends on DRM_VC4
select CEC_CORE
+ select DRM_DISPLAY_HDMI_CEC_HELPER
help
Choose this option if you have a Broadcom VC4 GPU
and want to use CEC.
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 07c91b450f93..049c92dd5d27 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -32,6 +32,7 @@
*/
#include <drm/display/drm_hdmi_audio_helper.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
#include <drm/display/drm_scdc_helper.h>
@@ -375,14 +376,6 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
drm_atomic_helper_connector_hdmi_hotplug(connector, status);
- if (status == connector_status_disconnected) {
- cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
- return;
- }
-
- cec_s_phys_addr(vc4_hdmi->cec_adap,
- connector->display_info.source_physical_address, false);
-
if (status != connector_status_connected)
return;
@@ -2384,8 +2377,8 @@ static irqreturn_t vc4_cec_irq_handler_rx_thread(int irq, void *priv)
struct vc4_hdmi *vc4_hdmi = priv;
if (vc4_hdmi->cec_rx_msg.len)
- cec_received_msg(vc4_hdmi->cec_adap,
- &vc4_hdmi->cec_rx_msg);
+ drm_connector_hdmi_cec_received_msg(&vc4_hdmi->connector,
+ &vc4_hdmi->cec_rx_msg);
return IRQ_HANDLED;
}
@@ -2395,15 +2388,17 @@ static irqreturn_t vc4_cec_irq_handler_tx_thread(int irq, void *priv)
struct vc4_hdmi *vc4_hdmi = priv;
if (vc4_hdmi->cec_tx_ok) {
- cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_OK,
- 0, 0, 0, 0);
+ drm_connector_hdmi_cec_transmit_done(&vc4_hdmi->connector,
+ CEC_TX_STATUS_OK,
+ 0, 0, 0, 0);
} else {
/*
* This CEC implementation makes 1 retry, so if we
* get a NACK, then that means it made 2 attempts.
*/
- cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_NACK,
- 0, 2, 0, 0);
+ drm_connector_hdmi_cec_transmit_done(&vc4_hdmi->connector,
+ CEC_TX_STATUS_NACK,
+ 0, 2, 0, 0);
}
return IRQ_HANDLED;
}
@@ -2560,9 +2555,9 @@ static irqreturn_t vc4_cec_irq_handler(int irq, void *priv)
return ret;
}
-static int vc4_hdmi_cec_enable(struct cec_adapter *adap)
+static int vc4_hdmi_cec_enable(struct drm_connector *connector)
{
- struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct drm_device *drm = vc4_hdmi->connector.dev;
/* clock period in microseconds */
const u32 usecs = 1000000 / CEC_CLOCK_FREQ;
@@ -2627,9 +2622,9 @@ static int vc4_hdmi_cec_enable(struct cec_adapter *adap)
return 0;
}
-static int vc4_hdmi_cec_disable(struct cec_adapter *adap)
+static int vc4_hdmi_cec_disable(struct drm_connector *connector)
{
- struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int idx;
@@ -2663,17 +2658,17 @@ static int vc4_hdmi_cec_disable(struct cec_adapter *adap)
return 0;
}
-static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
+static int vc4_hdmi_cec_adap_enable(struct drm_connector *connector, bool enable)
{
if (enable)
- return vc4_hdmi_cec_enable(adap);
+ return vc4_hdmi_cec_enable(connector);
else
- return vc4_hdmi_cec_disable(adap);
+ return vc4_hdmi_cec_disable(connector);
}
-static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
+static int vc4_hdmi_cec_adap_log_addr(struct drm_connector *connector, u8 log_addr)
{
- struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int idx;
@@ -2699,10 +2694,10 @@ static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
return 0;
}
-static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+static int vc4_hdmi_cec_adap_transmit(struct drm_connector *connector, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
- struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct drm_device *dev = vc4_hdmi->connector.dev;
unsigned long flags;
u32 val;
@@ -2745,84 +2740,65 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
return 0;
}
-static const struct cec_adap_ops vc4_hdmi_cec_adap_ops = {
- .adap_enable = vc4_hdmi_cec_adap_enable,
- .adap_log_addr = vc4_hdmi_cec_adap_log_addr,
- .adap_transmit = vc4_hdmi_cec_adap_transmit,
-};
-
-static void vc4_hdmi_cec_release(void *ptr)
-{
- struct vc4_hdmi *vc4_hdmi = ptr;
-
- cec_unregister_adapter(vc4_hdmi->cec_adap);
- vc4_hdmi->cec_adap = NULL;
-}
-
-static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
+static int vc4_hdmi_cec_init(struct drm_connector *connector)
{
- struct cec_connector_info conn_info;
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct platform_device *pdev = vc4_hdmi->pdev;
struct device *dev = &pdev->dev;
int ret;
- if (!of_property_present(dev->of_node, "interrupts")) {
- dev_warn(dev, "'interrupts' DT property is missing, no CEC\n");
- return 0;
- }
-
- vc4_hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops,
- vc4_hdmi,
- vc4_hdmi->variant->card_name,
- CEC_CAP_DEFAULTS |
- CEC_CAP_CONNECTOR_INFO, 1);
- ret = PTR_ERR_OR_ZERO(vc4_hdmi->cec_adap);
- if (ret < 0)
- return ret;
-
- cec_fill_conn_info_from_drm(&conn_info, &vc4_hdmi->connector);
- cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info);
-
if (vc4_hdmi->variant->external_irq_controller) {
ret = devm_request_threaded_irq(dev, platform_get_irq_byname(pdev, "cec-rx"),
vc4_cec_irq_handler_rx_bare,
vc4_cec_irq_handler_rx_thread, 0,
"vc4 hdmi cec rx", vc4_hdmi);
if (ret)
- goto err_delete_cec_adap;
+ return ret;
ret = devm_request_threaded_irq(dev, platform_get_irq_byname(pdev, "cec-tx"),
vc4_cec_irq_handler_tx_bare,
vc4_cec_irq_handler_tx_thread, 0,
"vc4 hdmi cec tx", vc4_hdmi);
if (ret)
- goto err_delete_cec_adap;
+ return ret;
} else {
ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0),
vc4_cec_irq_handler,
vc4_cec_irq_handler_thread, 0,
"vc4 hdmi cec", vc4_hdmi);
if (ret)
- goto err_delete_cec_adap;
+ return ret;
}
- ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev);
- if (ret < 0)
- goto err_delete_cec_adap;
+ return 0;
+}
+
+static const struct drm_connector_hdmi_cec_funcs vc4_hdmi_cec_funcs = {
+ .init = vc4_hdmi_cec_init,
+ .enable = vc4_hdmi_cec_adap_enable,
+ .log_addr = vc4_hdmi_cec_adap_log_addr,
+ .transmit = vc4_hdmi_cec_adap_transmit,
+};
+
+static int vc4_hdmi_cec_register(struct vc4_hdmi *vc4_hdmi)
+{
+ struct platform_device *pdev = vc4_hdmi->pdev;
+ struct device *dev = &pdev->dev;
+
+ if (!of_property_present(dev->of_node, "interrupts")) {
+ dev_warn(dev, "'interrupts' DT property is missing, no CEC\n");
+ return 0;
+ }
/*
- * NOTE: Strictly speaking, we should probably use a DRM-managed
- * registration there to avoid removing the CEC adapter by the
- * time the DRM driver doesn't have any user anymore.
+ * NOTE: the CEC adapter will be unregistered by drmm cleanup from
+ * drm_managed_release(), which is called from drm_dev_release()
+ * during device unbind.
*
* However, the CEC framework already cleans up the CEC adapter
* only when the last user has closed its file descriptor, so we
* don't need to handle it in DRM.
*
- * By the time the device-managed hook is executed, we will give
- * up our reference to the CEC adapter and therefore don't
- * really care when it's actually freed.
- *
* There's still a problematic sequence: if we unregister our
* CEC adapter, but the userspace keeps a handle on the CEC
* adapter but not the DRM device for some reason. In such a
@@ -2833,19 +2809,14 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
* the CEC framework already handles this too, by calling
* cec_is_registered() in cec_ioctl() and cec_poll().
*/
- ret = devm_add_action_or_reset(dev, vc4_hdmi_cec_release, vc4_hdmi);
- if (ret)
- return ret;
-
- return 0;
-
-err_delete_cec_adap:
- cec_delete_adapter(vc4_hdmi->cec_adap);
-
- return ret;
+ return drmm_connector_hdmi_cec_register(&vc4_hdmi->connector,
+ &vc4_hdmi_cec_funcs,
+ vc4_hdmi->variant->card_name,
+ 1,
+ &pdev->dev);
}
#else
-static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
+static int vc4_hdmi_cec_register(struct vc4_hdmi *vc4_hdmi)
{
return 0;
}
@@ -3250,7 +3221,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
if (ret)
goto err_put_runtime_pm;
- ret = vc4_hdmi_cec_init(vc4_hdmi);
+ ret = vc4_hdmi_cec_register(vc4_hdmi);
if (ret)
goto err_put_runtime_pm;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index a31157c99bee..8d069718df00 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -147,7 +147,6 @@ struct vc4_hdmi {
*/
bool disable_wifi_frequencies;
- struct cec_adapter *cec_adap;
struct cec_msg cec_rx_msg;
bool cec_tx_ok;
bool cec_irq_was_rx;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 056d344c5411..b4a53f68865b 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -497,8 +497,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
u32 v_subsample = fb->format->vsub;
int ret;
- crtc_state = drm_atomic_get_existing_crtc_state(state->state,
- state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
if (!crtc_state) {
DRM_DEBUG_KMS("Invalid crtc state\n");
return -EINVAL;
@@ -875,8 +874,7 @@ static void vc4_plane_calc_load(struct drm_plane_state *state)
unsigned int vscale_factor;
vc4_state = to_vc4_plane_state(state);
- crtc_state = drm_atomic_get_existing_crtc_state(state->state,
- state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
vrefresh = drm_mode_vrefresh(&crtc_state->adjusted_mode);
/* The HVS is able to process 2 pixels/cycle when scaling the source,
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index fd76730fd38c..07db319c3d7f 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -79,7 +79,7 @@ static struct dma_fence *vgem_fence_create(struct vgem_file *vfile,
dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
dma_fence_context_alloc(1), 1);
- timer_setup(&fence->timer, vgem_fence_timeout, 0);
+ timer_setup(&fence->timer, vgem_fence_timeout, TIMER_IRQSAFE);
/* We force the fence to expire within 10s to prevent driver hangs */
mod_timer(&fence->timer, jiffies + VGEM_FENCE_TIMEOUT);
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index c3315935d8bc..e972d9b015a9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -32,6 +32,8 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_vblank_helper.h>
#include "virtgpu_drv.h"
@@ -55,6 +57,7 @@ static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ DRM_CRTC_VBLANK_TIMER_FUNCS,
};
static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
@@ -99,6 +102,7 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ drm_crtc_vblank_on(crtc);
}
static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -108,6 +112,8 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
+ drm_crtc_vblank_off(crtc);
+
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
virtio_gpu_notify(vgdev);
}
@@ -121,9 +127,10 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
- struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
- crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
+ struct drm_pending_vblank_event *event;
/*
* virtio-gpu can't do modeset and plane update operations
@@ -133,6 +140,20 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
*/
if (drm_atomic_crtc_needs_modeset(crtc_state))
output->needs_modeset = true;
+
+ spin_lock_irq(&dev->event_lock);
+
+ event = crtc_state->event;
+ crtc_state->event = NULL;
+
+ if (event) {
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ }
+
+ spin_unlock_irq(&dev->event_lock);
}
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
@@ -257,6 +278,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
struct drm_encoder *encoder = &output->enc;
struct drm_crtc *crtc = &output->crtc;
struct drm_plane *primary, *cursor;
+ int ret;
output->index = index;
if (index == 0) {
@@ -271,8 +293,10 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
cursor = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_CURSOR, index);
if (IS_ERR(cursor))
return PTR_ERR(cursor);
- drm_crtc_init_with_planes(dev, crtc, primary, cursor,
- &virtio_gpu_crtc_funcs, NULL);
+ ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
+ &virtio_gpu_crtc_funcs, NULL);
+ if (ret)
+ return ret;
drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
@@ -356,6 +380,10 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
for (i = 0 ; i < vgdev->num_scanouts; ++i)
vgdev_output_init(vgdev, i);
+ ret = drm_vblank_init(vgdev->ddev, vgdev->num_scanouts);
+ if (ret)
+ return ret;
+
drm_mode_config_reset(vgdev->ddev);
return 0;
}
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index e60573e0f3e9..bac0790c6577 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -7,25 +7,18 @@
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_vblank_helper.h>
#include "vkms_drv.h"
-static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
+static bool vkms_crtc_handle_vblank_timeout(struct drm_crtc *crtc)
{
- struct vkms_output *output = container_of(timer, struct vkms_output,
- vblank_hrtimer);
- struct drm_crtc *crtc = &output->crtc;
+ struct vkms_output *output = drm_crtc_to_vkms_output(crtc);
struct vkms_crtc_state *state;
- u64 ret_overrun;
bool ret, fence_cookie;
fence_cookie = dma_fence_begin_signalling();
- ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
- output->period_ns);
- if (ret_overrun != 1)
- pr_warn("%s: vblank timer overrun\n", __func__);
-
spin_lock(&output->lock);
ret = drm_crtc_handle_vblank(crtc);
if (!ret)
@@ -57,55 +50,6 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
dma_fence_end_signalling(fence_cookie);
- return HRTIMER_RESTART;
-}
-
-static int vkms_enable_vblank(struct drm_crtc *crtc)
-{
- struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
- struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
-
- hrtimer_setup(&out->vblank_hrtimer, &vkms_vblank_simulate, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- out->period_ns = ktime_set(0, vblank->framedur_ns);
- hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
-
- return 0;
-}
-
-static void vkms_disable_vblank(struct drm_crtc *crtc)
-{
- struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
-
- hrtimer_cancel(&out->vblank_hrtimer);
-}
-
-static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
- int *max_error, ktime_t *vblank_time,
- bool in_vblank_irq)
-{
- struct vkms_output *output = drm_crtc_to_vkms_output(crtc);
- struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
-
- if (!READ_ONCE(vblank->enabled)) {
- *vblank_time = ktime_get();
- return true;
- }
-
- *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
-
- if (WARN_ON(*vblank_time == vblank->time))
- return true;
-
- /*
- * To prevent races we roll the hrtimer forward before we do any
- * interrupt processing - this is how real hw works (the interrupt is
- * only generated after all the vblank registers are updated) and what
- * the vblank core expects. Therefore we need to always correct the
- * timestampe by one frame.
- */
- *vblank_time -= output->period_ns;
-
return true;
}
@@ -159,9 +103,7 @@ static const struct drm_crtc_funcs vkms_crtc_funcs = {
.reset = vkms_atomic_crtc_reset,
.atomic_duplicate_state = vkms_atomic_crtc_duplicate_state,
.atomic_destroy_state = vkms_atomic_crtc_destroy_state,
- .enable_vblank = vkms_enable_vblank,
- .disable_vblank = vkms_disable_vblank,
- .get_vblank_timestamp = vkms_get_vblank_timestamp,
+ DRM_CRTC_VBLANK_TIMER_FUNCS,
.get_crc_sources = vkms_get_crc_sources,
.set_crc_source = vkms_set_crc_source,
.verify_crc_source = vkms_verify_crc_source,
@@ -185,7 +127,7 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
return ret;
drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
- plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane);
+ plane_state = drm_atomic_get_new_plane_state(crtc_state->state, plane);
WARN_ON(!plane_state);
if (!plane_state->visible)
@@ -201,7 +143,7 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
i = 0;
drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
- plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane);
+ plane_state = drm_atomic_get_new_plane_state(crtc_state->state, plane);
if (!plane_state->visible)
continue;
@@ -213,18 +155,6 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
return 0;
}
-static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- drm_crtc_vblank_on(crtc);
-}
-
-static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- drm_crtc_vblank_off(crtc);
-}
-
static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
__acquires(&vkms_output->lock)
@@ -265,8 +195,9 @@ static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
.atomic_check = vkms_crtc_atomic_check,
.atomic_begin = vkms_crtc_atomic_begin,
.atomic_flush = vkms_crtc_atomic_flush,
- .atomic_enable = vkms_crtc_atomic_enable,
- .atomic_disable = vkms_crtc_atomic_disable,
+ .atomic_enable = drm_crtc_vblank_atomic_enable,
+ .atomic_disable = drm_crtc_vblank_atomic_disable,
+ .handle_vblank_timeout = vkms_crtc_handle_vblank_timeout,
};
struct vkms_output *vkms_crtc_init(struct drm_device *dev, struct drm_plane *primary,
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 8013c31efe3b..fb9711e1c6fb 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -215,8 +215,6 @@ struct vkms_output {
struct drm_crtc crtc;
struct drm_writeback_connector wb_connector;
struct drm_encoder wb_encoder;
- struct hrtimer vblank_hrtimer;
- ktime_t period_ns;
struct workqueue_struct *composer_workq;
spinlock_t lock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index eedf1fe60be7..39f8c46550c2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -37,7 +37,7 @@ static void vmw_gem_object_free(struct drm_gem_object *gobj)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj);
if (bo)
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static int vmw_gem_object_open(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7e281c3c6bc5..c4ac9b47e23a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -15,6 +15,7 @@
#include "vmw_surface_cache.h"
#include "device_include/svga3d_surfacedefs.h"
+#include <drm/drm_dumb_buffers.h>
#include <drm/ttm/ttm_placement.h>
#define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
@@ -2267,23 +2268,9 @@ int vmw_dumb_create(struct drm_file *file_priv,
* contents is going to be rendered guest side.
*/
if (!dev_priv->has_mob || !vmw_supports_3d(dev_priv)) {
- int cpp = DIV_ROUND_UP(args->bpp, 8);
-
- switch (cpp) {
- case 1: /* DRM_FORMAT_C8 */
- case 2: /* DRM_FORMAT_RGB565 */
- case 4: /* DRM_FORMAT_XRGB8888 */
- break;
- default:
- /*
- * Dumb buffers don't allow anything else.
- * This is tested via IGT's dumb_buffers
- */
- return -EINVAL;
- }
-
- args->pitch = args->width * cpp;
- args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
+ ret = drm_mode_size_dumb(dev, args, 0, 0);
+ if (ret)
+ return ret;
ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
args->size, &args->handle,
diff --git a/drivers/gpu/drm/xe/Kconfig.debug b/drivers/gpu/drm/xe/Kconfig.debug
index 87902b4bd6d3..01227c77f6d7 100644
--- a/drivers/gpu/drm/xe/Kconfig.debug
+++ b/drivers/gpu/drm/xe/Kconfig.debug
@@ -40,23 +40,23 @@ config DRM_XE_DEBUG_VM
If in doubt, say "N".
-config DRM_XE_DEBUG_MEMIRQ
- bool "Enable extra memirq debugging"
+config DRM_XE_DEBUG_SRIOV
+ bool "Enable extra SR-IOV debugging"
default n
+ imply DRM_XE_DEBUG_MEMIRQ
help
- Choose this option to enable additional debugging info for
- memory based interrupts.
+ Enable extra SR-IOV debugging info.
Recommended for driver developers only.
If in doubt, say "N".
-config DRM_XE_DEBUG_SRIOV
- bool "Enable extra SR-IOV debugging"
+config DRM_XE_DEBUG_MEMIRQ
+ bool "Enable extra memirq debugging"
default n
- select DRM_XE_DEBUG_MEMIRQ
help
- Enable extra SR-IOV debugging info.
+ Choose this option to enable additional debugging info for
+ memory based interrupts.
Recommended for driver developers only.
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index d9c6cf0f189e..3fbec058facc 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -174,7 +174,11 @@ xe-$(CONFIG_PCI_IOV) += \
xe_lmtt_ml.o \
xe_pci_sriov.o \
xe_sriov_pf.o \
- xe_sriov_pf_service.o
+ xe_sriov_pf_control.o \
+ xe_sriov_pf_debugfs.o \
+ xe_sriov_pf_provision.o \
+ xe_sriov_pf_service.o \
+ xe_tile_sriov_pf_debugfs.o
# include helpers for tests even when XE is built-in
ifdef CONFIG_DRM_XE_KUNIT_TEST
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
index 31090c69dfbe..47756e4674a1 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -196,14 +196,6 @@ enum xe_guc_register_context_multi_lrc_param_offsets {
XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN = 11,
};
-enum xe_guc_context_wq_item_offsets {
- XE_GUC_CONTEXT_WQ_HEADER_DATA_0_TYPE_LEN = 0,
- XE_GUC_CONTEXT_WQ_EL_INFO_DATA_1_CTX_DESC_LOW,
- XE_GUC_CONTEXT_WQ_EL_INFO_DATA_2_GUCCTX_RINGTAIL_FREEZEPOCS,
- XE_GUC_CONTEXT_WQ_EL_INFO_DATA_3_WI_FENCE_ID,
- XE_GUC_CONTEXT_WQ_EL_CHILD_LIST_DATA_4_RINGTAIL,
-};
-
enum xe_guc_report_status {
XE_GUC_REPORT_STATUS_UNKNOWN = 0x0,
XE_GUC_REPORT_STATUS_ACKED = 0x1,
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index 8ea9a472113c..af8139d00161 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -13,7 +13,7 @@
#include "xe_ttm_stolen_mgr.h"
#include "xe_wa.h"
-#include <generated/xe_wa_oob.h>
+#include <generated/xe_device_wa_oob.h>
struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
@@ -41,7 +41,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
size = PAGE_ALIGN(size);
obj = ERR_PTR(-ENODEV);
- if (!IS_DGFX(xe) && !XE_GT_WA(xe_root_mmio_gt(xe), 22019338487_display)) {
+ if (!IS_DGFX(xe) && !XE_DEVICE_WA(xe, 22019338487_display)) {
obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe),
size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index 19e691fccf8c..083c6904f8f1 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -223,15 +223,14 @@ void xe_display_irq_reset(struct xe_device *xe)
gen11_display_irq_reset(display);
}
-void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
+void xe_display_irq_postinstall(struct xe_device *xe)
{
struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
- if (gt->info.id == XE_GT0)
- gen11_de_irq_postinstall(display);
+ gen11_de_irq_postinstall(display);
}
static bool suspend_to_idle(void)
@@ -324,7 +323,7 @@ void xe_display_pm_suspend(struct xe_device *xe)
* properly.
*/
intel_power_domains_disable(display);
- drm_client_dev_suspend(&xe->drm, false);
+ drm_client_dev_suspend(&xe->drm);
if (intel_display_device_present(display)) {
drm_kms_helper_poll_disable(&xe->drm);
@@ -356,7 +355,7 @@ void xe_display_pm_shutdown(struct xe_device *xe)
return;
intel_power_domains_disable(display);
- drm_client_dev_suspend(&xe->drm, false);
+ drm_client_dev_suspend(&xe->drm);
if (intel_display_device_present(display)) {
drm_kms_helper_poll_disable(&xe->drm);
@@ -481,7 +480,7 @@ void xe_display_pm_resume(struct xe_device *xe)
intel_opregion_resume(display);
- drm_client_dev_resume(&xe->drm, false);
+ drm_client_dev_resume(&xe->drm);
intel_power_domains_enable(display);
}
diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h
index e533aa4750bc..76db95c25f7e 100644
--- a/drivers/gpu/drm/xe/display/xe_display.h
+++ b/drivers/gpu/drm/xe/display/xe_display.h
@@ -26,7 +26,7 @@ void xe_display_unregister(struct xe_device *xe);
void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl);
void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir);
void xe_display_irq_reset(struct xe_device *xe);
-void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt);
+void xe_display_irq_postinstall(struct xe_device *xe);
void xe_display_pm_suspend(struct xe_device *xe);
void xe_display_pm_shutdown(struct xe_device *xe);
@@ -55,7 +55,7 @@ static inline void xe_display_unregister(struct xe_device *xe) {}
static inline void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl) {}
static inline void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir) {}
static inline void xe_display_irq_reset(struct xe_device *xe) {}
-static inline void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) {}
+static inline void xe_display_irq_postinstall(struct xe_device *xe) {}
static inline void xe_display_pm_suspend(struct xe_device *xe) {}
static inline void xe_display_pm_shutdown(struct xe_device *xe) {}
diff --git a/drivers/gpu/drm/xe/display/xe_display_wa.c b/drivers/gpu/drm/xe/display/xe_display_wa.c
index 8ada1cbcb16c..2aa1b8c03411 100644
--- a/drivers/gpu/drm/xe/display/xe_display_wa.c
+++ b/drivers/gpu/drm/xe/display/xe_display_wa.c
@@ -13,6 +13,7 @@
bool intel_display_needs_wa_16023588340(struct intel_display *display)
{
struct xe_device *xe = to_xe_device(display->drm);
+ struct xe_gt *wa_gt = xe_root_mmio_gt(xe);
- return XE_GT_WA(xe_root_mmio_gt(xe), 16023588340);
+ return wa_gt && XE_GT_WA(wa_gt, 16023588340);
}
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index 94f00def811b..12d25c5290fd 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -25,7 +25,7 @@
#include "xe_vram_types.h"
#include "xe_wa.h"
-#include <generated/xe_wa_oob.h>
+#include <generated/xe_device_wa_oob.h>
void intel_plane_initial_vblank_wait(struct intel_crtc *crtc)
{
@@ -123,7 +123,7 @@ initial_plane_bo(struct xe_device *xe,
phys_base = base;
flags |= XE_BO_FLAG_STOLEN;
- if (XE_GT_WA(xe_root_mmio_gt(xe), 22019338487_display))
+ if (XE_DEVICE_WA(xe, 22019338487_display))
return NULL;
/*
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index f4c3e1187a00..68172b0248a6 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -141,6 +141,8 @@
#define INHIBIT_SWITCH_UNTIL_PREEMPTED REG_BIT(31)
#define IDLE_DELAY REG_GENMASK(20, 0)
+#define RING_CURRENT_LRCA(base) XE_REG((base) + 0x240)
+
#define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244, XE_REG_OPTION_MASKED)
#define CTX_CTRL_PXP_ENABLE REG_BIT(10)
#define CTX_CTRL_OAC_CONTEXT_ENABLE REG_BIT(8)
@@ -153,6 +155,8 @@
#define GFX_DISABLE_LEGACY_MODE REG_BIT(3)
#define GFX_MSIX_INTERRUPT_ENABLE REG_BIT(13)
+#define RING_CSMQDEBUG(base) XE_REG((base) + 0x2b0)
+
#define RING_TIMESTAMP(base) XE_REG((base) + 0x358)
#define RING_TIMESTAMP_UDW(base) XE_REG((base) + 0x358 + 4)
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 51f2a03847f9..228de47c0f3f 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -239,6 +239,9 @@
#define XE2_GT_GEOMETRY_DSS_1 XE_REG(0x9150)
#define XE2_GT_GEOMETRY_DSS_2 XE_REG(0x9154)
+#define SERVICE_COPY_ENABLE XE_REG(0x9170)
+#define FUSE_SERVICE_COPY_ENABLE_MASK REG_GENMASK(7, 0)
+
#define GDRST XE_REG(0x941c)
#define GRDOM_GUC REG_BIT(3)
#define GRDOM_FULL REG_BIT(0)
@@ -346,10 +349,6 @@
#define VDN_HCP_POWERGATE_ENABLE(n) REG_BIT(3 + 2 * (n))
#define VDN_MFXVDENC_POWERGATE_ENABLE(n) REG_BIT(4 + 2 * (n))
-#define CTC_MODE XE_REG(0xa26c)
-#define CTC_SHIFT_PARAMETER_MASK REG_GENMASK(2, 1)
-#define CTC_SOURCE_DIVIDE_LOGIC REG_BIT(0)
-
#define FORCEWAKE_RENDER XE_REG(0xa278)
#define POWERGATE_DOMAIN_STATUS XE_REG(0xa2a0)
diff --git a/drivers/gpu/drm/xe/regs/xe_i2c_regs.h b/drivers/gpu/drm/xe/regs/xe_i2c_regs.h
index af781c8e4a80..f2e455e2bfe4 100644
--- a/drivers/gpu/drm/xe/regs/xe_i2c_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_i2c_regs.h
@@ -14,6 +14,9 @@
#define REG_SG_REMAP_ADDR_PREFIX XE_REG(SOC_BASE + 0x0164)
#define REG_SG_REMAP_ADDR_POSTFIX XE_REG(SOC_BASE + 0x0168)
+#define I2C_BRIDGE_PCICFGCTL XE_REG(I2C_BRIDGE_OFFSET + 0x200)
+#define ACPI_INTR_EN REG_BIT(1)
+
#define I2C_CONFIG_CMD XE_REG(I2C_CONFIG_SPACE_OFFSET + PCI_COMMAND)
#define I2C_CONFIG_PMCSR XE_REG(I2C_CONFIG_SPACE_OFFSET + 0x84)
diff --git a/drivers/gpu/drm/xe/regs/xe_irq_regs.h b/drivers/gpu/drm/xe/regs/xe_irq_regs.h
index 7c2a3a140142..2f97662d958d 100644
--- a/drivers/gpu/drm/xe/regs/xe_irq_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_irq_regs.h
@@ -65,7 +65,10 @@
#define BCS_RSVD_INTR_MASK XE_REG(0x1900a0, XE_REG_OPTION_VF)
#define VCS0_VCS1_INTR_MASK XE_REG(0x1900a8, XE_REG_OPTION_VF)
#define VCS2_VCS3_INTR_MASK XE_REG(0x1900ac, XE_REG_OPTION_VF)
+#define VCS4_VCS5_INTR_MASK XE_REG(0x1900b0, XE_REG_OPTION_VF)
+#define VCS6_VCS7_INTR_MASK XE_REG(0x1900b4, XE_REG_OPTION_VF)
#define VECS0_VECS1_INTR_MASK XE_REG(0x1900d0, XE_REG_OPTION_VF)
+#define VECS2_VECS3_INTR_MASK XE_REG(0x1900d4, XE_REG_OPTION_VF)
#define HECI2_RSVD_INTR_MASK XE_REG(0x1900e4)
#define GUC_SG_INTR_MASK XE_REG(0x1900e8, XE_REG_OPTION_VF)
#define GPM_WGBOXPERF_INTR_MASK XE_REG(0x1900ec, XE_REG_OPTION_VF)
@@ -80,9 +83,10 @@
#define GT_WAIT_SEMAPHORE_INTERRUPT REG_BIT(11)
#define GT_CONTEXT_SWITCH_INTERRUPT REG_BIT(8)
#define GSC_ER_COMPLETE REG_BIT(5)
-#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT REG_BIT(4)
+#define GT_FLUSH_COMPLETE_INTERRUPT REG_BIT(4)
#define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3)
-#define GT_RENDER_USER_INTERRUPT REG_BIT(0)
+#define GT_COMPUTE_WALKER_INTERRUPT REG_BIT(2)
+#define GT_MI_USER_INTERRUPT REG_BIT(0)
/* irqs for OTHER_KCR_INSTANCE */
#define KCR_PXP_STATE_TERMINATED_INTERRUPT REG_BIT(1)
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
index a7e548a2bdfb..5df98de5ba3c 100644
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
@@ -31,6 +31,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
struct drm_exec *exec)
{
struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
+ struct dma_buf_attachment *attach;
u32 mem_type;
int ret;
@@ -46,7 +47,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
mem_type = XE_PL_TT;
else if (params->force_different_devices && !is_dynamic(params) &&
(params->mem_mask & XE_BO_FLAG_SYSTEM))
- /* Pin migrated to TT */
+ /* Pin migrated to TT on non-dynamic attachments. */
mem_type = XE_PL_TT;
if (!xe_bo_is_mem_type(exported, mem_type)) {
@@ -88,6 +89,18 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
+ /* Check that we can pin without migrating. */
+ attach = list_first_entry_or_null(&dmabuf->attachments, typeof(*attach), node);
+ if (attach) {
+ int err = dma_buf_pin(attach);
+
+ if (!err) {
+ KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
+ dma_buf_unpin(attach);
+ }
+ KUNIT_EXPECT_EQ(test, err, 0);
+ }
+
if (params->force_different_devices)
KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(imported, XE_PL_TT));
else
@@ -150,7 +163,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
xe_bo_lock(import_bo, false);
err = xe_bo_validate(import_bo, NULL, false, exec);
- /* Pinning in VRAM is not allowed. */
+ /* Pinning in VRAM is not allowed for non-dynamic attachments */
if (!is_dynamic(params) &&
params->force_different_devices &&
!(params->mem_mask & XE_BO_FLAG_SYSTEM))
diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c
index 663a79ec960d..f3179b31f13e 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci.c
@@ -311,8 +311,8 @@ const void *xe_pci_id_gen_param(struct kunit *test, const void *prev, char *desc
}
EXPORT_SYMBOL_IF_KUNIT(xe_pci_id_gen_param);
-static void fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type,
- u32 *ver, u32 *revid)
+static int fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type,
+ u32 *ver, u32 *revid)
{
struct kunit *test = kunit_get_current_test();
struct xe_pci_fake_data *data = test->priv;
@@ -324,6 +324,8 @@ static void fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type,
*ver = data->graphics_verx100;
*revid = xe_step_to_gmdid(data->step.graphics);
}
+
+ return 0;
}
static void fake_xe_info_probe_tile_count(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
index b0254b014fe4..d2255a59e58f 100644
--- a/drivers/gpu/drm/xe/tests/xe_rtp_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
@@ -48,12 +48,14 @@ struct rtp_test_case {
const struct xe_rtp_entry *entries;
};
-static bool match_yes(const struct xe_gt *gt, const struct xe_hw_engine *hwe)
+static bool match_yes(const struct xe_device *xe, const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
{
return true;
}
-static bool match_no(const struct xe_gt *gt, const struct xe_hw_engine *hwe)
+static bool match_no(const struct xe_device *xe, const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
{
return false;
}
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 4410e28dee54..7b6502081873 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -9,6 +9,7 @@
#include <linux/nospec.h>
#include <drm/drm_drv.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_managed.h>
#include <drm/ttm/ttm_backup.h>
@@ -34,6 +35,7 @@
#include "xe_res_cursor.h"
#include "xe_shrinker.h"
#include "xe_sriov_vf_ccs.h"
+#include "xe_tile.h"
#include "xe_trace_bo.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"
@@ -81,6 +83,10 @@ static struct ttm_placement tt_placement = {
.placement = tt_placement_flags,
};
+#define for_each_set_bo_vram_flag(bit__, bo_flags__) \
+ for (unsigned int __bit_tmp = BIT(0); __bit_tmp <= XE_BO_FLAG_VRAM_MASK; __bit_tmp <<= 1) \
+ for_each_if(((bit__) = __bit_tmp) & (bo_flags__) & XE_BO_FLAG_VRAM_MASK)
+
bool mem_type_is_vram(u32 mem_type)
{
return mem_type >= XE_PL_VRAM0 && mem_type != XE_PL_STOLEN;
@@ -213,6 +219,27 @@ static bool force_contiguous(u32 bo_flags)
bo_flags & XE_BO_FLAG_PINNED;
}
+static u8 vram_bo_flag_to_tile_id(struct xe_device *xe, u32 vram_bo_flag)
+{
+ xe_assert(xe, vram_bo_flag & XE_BO_FLAG_VRAM_MASK);
+ xe_assert(xe, (vram_bo_flag & (vram_bo_flag - 1)) == 0);
+
+ return __ffs(vram_bo_flag >> (__ffs(XE_BO_FLAG_VRAM0) - 1)) - 1;
+}
+
+static u32 bo_vram_flags_to_vram_placement(struct xe_device *xe, u32 bo_flags, u32 vram_flag,
+ enum ttm_bo_type type)
+{
+ u8 tile_id = vram_bo_flag_to_tile_id(xe, vram_flag);
+
+ xe_assert(xe, tile_id < xe->info.tile_count);
+
+ if (type == ttm_bo_type_kernel && !(bo_flags & XE_BO_FLAG_FORCE_USER_VRAM))
+ return xe->tiles[tile_id].mem.kernel_vram->placement;
+ else
+ return xe->tiles[tile_id].mem.vram->placement;
+}
+
static void add_vram(struct xe_device *xe, struct xe_bo *bo,
struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c)
{
@@ -245,12 +272,15 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
}
static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
- u32 bo_flags, u32 *c)
+ u32 bo_flags, enum ttm_bo_type type, u32 *c)
{
- if (bo_flags & XE_BO_FLAG_VRAM0)
- add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
- if (bo_flags & XE_BO_FLAG_VRAM1)
- add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
+ u32 vram_flag;
+
+ for_each_set_bo_vram_flag(vram_flag, bo_flags) {
+ u32 pl = bo_vram_flags_to_vram_placement(xe, bo_flags, vram_flag, type);
+
+ add_vram(xe, bo, bo->placements, bo_flags, pl, c);
+ }
}
static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
@@ -269,11 +299,11 @@ static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
}
static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
- u32 bo_flags)
+ u32 bo_flags, enum ttm_bo_type type)
{
u32 c = 0;
- try_add_vram(xe, bo, bo_flags, &c);
+ try_add_vram(xe, bo, bo_flags, type, &c);
try_add_system(xe, bo, bo_flags, &c);
try_add_stolen(xe, bo, bo_flags, &c);
@@ -289,10 +319,10 @@ static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
}
int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
- u32 bo_flags)
+ u32 bo_flags, enum ttm_bo_type type)
{
xe_bo_assert_held(bo);
- return __xe_bo_placement_for_flags(xe, bo, bo_flags);
+ return __xe_bo_placement_for_flags(xe, bo, bo_flags, type);
}
static void xe_evict_flags(struct ttm_buffer_object *tbo,
@@ -1708,7 +1738,7 @@ static void xe_gem_object_free(struct drm_gem_object *obj)
* refcount directly if needed.
*/
__xe_bo_vunmap(gem_to_xe_bo(obj));
- ttm_bo_put(container_of(obj, struct ttm_buffer_object, base));
+ ttm_bo_fini(container_of(obj, struct ttm_buffer_object, base));
}
static void xe_gem_object_close(struct drm_gem_object *obj,
@@ -2164,7 +2194,7 @@ struct xe_bo *xe_bo_init_locked(struct xe_device *xe, struct xe_bo *bo,
xe_validation_assert_exec(xe, exec, &bo->ttm.base);
if (!(flags & XE_BO_FLAG_FIXED_PLACEMENT)) {
- err = __xe_bo_placement_for_flags(xe, bo, bo->flags);
+ err = __xe_bo_placement_for_flags(xe, bo, bo->flags, type);
if (WARN_ON(err)) {
xe_ttm_bo_destroy(&bo->ttm);
return ERR_PTR(err);
@@ -2222,34 +2252,31 @@ struct xe_bo *xe_bo_init_locked(struct xe_device *xe, struct xe_bo *bo,
}
static int __xe_bo_fixed_placement(struct xe_device *xe,
- struct xe_bo *bo,
+ struct xe_bo *bo, enum ttm_bo_type type,
u32 flags,
u64 start, u64 end, u64 size)
{
struct ttm_place *place = bo->placements;
+ u32 vram_flag, vram_stolen_flags;
if (flags & (XE_BO_FLAG_USER | XE_BO_FLAG_SYSTEM))
return -EINVAL;
+ vram_flag = flags & XE_BO_FLAG_VRAM_MASK;
+ vram_stolen_flags = (flags & (XE_BO_FLAG_STOLEN)) | vram_flag;
+
+ /* check if more than one VRAM/STOLEN flag is set */
+ if (hweight32(vram_stolen_flags) > 1)
+ return -EINVAL;
+
place->flags = TTM_PL_FLAG_CONTIGUOUS;
place->fpfn = start >> PAGE_SHIFT;
place->lpfn = end >> PAGE_SHIFT;
- switch (flags & (XE_BO_FLAG_STOLEN | XE_BO_FLAG_VRAM_MASK)) {
- case XE_BO_FLAG_VRAM0:
- place->mem_type = XE_PL_VRAM0;
- break;
- case XE_BO_FLAG_VRAM1:
- place->mem_type = XE_PL_VRAM1;
- break;
- case XE_BO_FLAG_STOLEN:
+ if (flags & XE_BO_FLAG_STOLEN)
place->mem_type = XE_PL_STOLEN;
- break;
-
- default:
- /* 0 or multiple of the above set */
- return -EINVAL;
- }
+ else
+ place->mem_type = bo_vram_flags_to_vram_placement(xe, flags, vram_flag, type);
bo->placement = (struct ttm_placement) {
.num_placement = 1,
@@ -2278,7 +2305,7 @@ __xe_bo_create_locked(struct xe_device *xe,
return bo;
flags |= XE_BO_FLAG_FIXED_PLACEMENT;
- err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size);
+ err = __xe_bo_fixed_placement(xe, bo, type, flags, start, end, size);
if (err) {
xe_bo_free(bo);
return ERR_PTR(err);
@@ -3577,14 +3604,13 @@ int xe_bo_dumb_create(struct drm_file *file_priv,
struct xe_device *xe = to_xe_device(dev);
struct xe_bo *bo;
uint32_t handle;
- int cpp = DIV_ROUND_UP(args->bpp, 8);
int err;
u32 page_size = max_t(u32, PAGE_SIZE,
xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K);
- args->pitch = ALIGN(args->width * cpp, 64);
- args->size = ALIGN(mul_u32_u32(args->pitch, args->height),
- page_size);
+ err = drm_mode_size_dumb(dev, args, SZ_64, page_size);
+ if (err)
+ return err;
bo = xe_bo_create_user(xe, NULL, args->size,
DRM_XE_GEM_CPU_CACHING_WC,
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index a77af42b5f9e..353d607d301d 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -49,6 +49,7 @@
#define XE_BO_FLAG_GGTT2 BIT(22)
#define XE_BO_FLAG_GGTT3 BIT(23)
#define XE_BO_FLAG_CPU_ADDR_MIRROR BIT(24)
+#define XE_BO_FLAG_FORCE_USER_VRAM BIT(25)
/* this one is trigger internally only */
#define XE_BO_FLAG_INTERNAL_TEST BIT(30)
@@ -122,7 +123,7 @@ struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_til
int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src);
int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
- u32 bo_flags);
+ u32 bo_flags, enum ttm_bo_type type);
static inline struct xe_bo *ttm_to_xe_bo(const struct ttm_buffer_object *bo)
{
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c
index bc5b4c5fab81..7661fca7f278 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.c
+++ b/drivers/gpu/drm/xe/xe_bo_evict.c
@@ -73,6 +73,11 @@ int xe_bo_notifier_prepare_all_pinned(struct xe_device *xe)
&xe->pinned.late.kernel_bo_present,
xe_bo_notifier_prepare_pinned);
+ if (!ret)
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+ &xe->pinned.late.external,
+ xe_bo_notifier_prepare_pinned);
+
return ret;
}
@@ -93,6 +98,10 @@ void xe_bo_notifier_unprepare_all_pinned(struct xe_device *xe)
(void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
&xe->pinned.late.kernel_bo_present,
xe_bo_notifier_unprepare_pinned);
+
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+ &xe->pinned.late.external,
+ xe_bo_notifier_unprepare_pinned);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c
index 139663423185..c1419a270fa4 100644
--- a/drivers/gpu/drm/xe/xe_configfs.c
+++ b/drivers/gpu/drm/xe/xe_configfs.c
@@ -15,9 +15,11 @@
#include "instructions/xe_mi_commands.h"
#include "xe_configfs.h"
+#include "xe_gt_types.h"
#include "xe_hw_engine_types.h"
#include "xe_module.h"
#include "xe_pci_types.h"
+#include "xe_sriov_types.h"
/**
* DOC: Xe Configfs
@@ -56,6 +58,7 @@
* :
* └── 0000:03:00.0
* ├── survivability_mode
+ * ├── gt_types_allowed
* ├── engines_allowed
* └── enable_psmi
*
@@ -79,6 +82,44 @@
*
* This attribute can only be set before binding to the device.
*
+ * Allowed GT types:
+ * -----------------
+ *
+ * Allow only specific types of GTs to be detected and initialized by the
+ * driver. Any combination of GT types can be enabled/disabled, although
+ * some settings will cause the device to fail to probe.
+ *
+ * Writes support both comma- and newline-separated input format. Reads
+ * will always return one GT type per line. "primary" and "media" are the
+ * GT type names supported by this interface.
+ *
+ * This attribute can only be set before binding to the device.
+ *
+ * Examples:
+ *
+ * Allow both primary and media GTs to be initialized and used. This matches
+ * the driver's default behavior::
+ *
+ * # echo 'primary,media' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
+ *
+ * Allow only the primary GT of each tile to be initialized and used,
+ * effectively disabling the media GT if it exists on the platform::
+ *
+ * # echo 'primary' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
+ *
+ * Allow only the media GT of each tile to be initialized and used,
+ * effectively disabling the primary GT. **This configuration will cause
+ * device probe failure on all current platforms, but may be allowed on
+ * igpu platforms in the future**::
+ *
+ * # echo 'media' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
+ *
+ * Disable all GTs. Only other GPU IP (such as display) is potentially usable.
+ * **This configuration will cause device probe failure on all current
+ * platforms, but may be allowed on igpu platforms in the future**::
+ *
+ * # echo '' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
+ *
* Allowed engines:
* ----------------
*
@@ -169,6 +210,32 @@
* Currently this is implemented only for post and mid context restore and
* these attributes can only be set before binding to the device.
*
+ * Max SR-IOV Virtual Functions
+ * ----------------------------
+ *
+ * This config allows to limit number of the Virtual Functions (VFs) that can
+ * be managed by the Physical Function (PF) driver, where value 0 disables the
+ * PF mode (no VFs).
+ *
+ * The default max_vfs config value is taken from the max_vfs modparam.
+ *
+ * How to enable PF with support with unlimited (up to HW limit) number of VFs::
+ *
+ * # echo unlimited > /sys/kernel/config/xe/0000:00:02.0/sriov/max_vfs
+ * # echo 0000:00:02.0 > /sys/bus/pci/drivers/xe/bind
+ *
+ * How to enable PF with support up to 3 VFs::
+ *
+ * # echo 3 > /sys/kernel/config/xe/0000:00:02.0/sriov/max_vfs
+ * # echo 0000:00:02.0 > /sys/bus/pci/drivers/xe/bind
+ *
+ * How to disable PF mode and always run as native::
+ *
+ * # echo 0 > /sys/kernel/config/xe/0000:00:02.0/sriov/max_vfs
+ * # echo 0000:00:02.0 > /sys/bus/pci/drivers/xe/bind
+ *
+ * This setting only takes effect when probing the device.
+ *
* Remove devices
* ==============
*
@@ -185,30 +252,44 @@ struct wa_bb {
struct xe_config_group_device {
struct config_group group;
+ struct config_group sriov;
struct xe_config_device {
+ u64 gt_types_allowed;
u64 engines_allowed;
struct wa_bb ctx_restore_post_bb[XE_ENGINE_CLASS_MAX];
struct wa_bb ctx_restore_mid_bb[XE_ENGINE_CLASS_MAX];
bool survivability_mode;
bool enable_psmi;
+ struct {
+ unsigned int max_vfs;
+ } sriov;
} config;
/* protects attributes */
struct mutex lock;
/* matching descriptor */
const struct xe_device_desc *desc;
+ /* tentative SR-IOV mode */
+ enum xe_sriov_mode mode;
};
static const struct xe_config_device device_defaults = {
+ .gt_types_allowed = U64_MAX,
.engines_allowed = U64_MAX,
.survivability_mode = false,
.enable_psmi = false,
+ .sriov = {
+ .max_vfs = UINT_MAX,
+ },
};
static void set_device_defaults(struct xe_config_device *config)
{
*config = device_defaults;
+#ifdef CONFIG_PCI_IOV
+ config->sriov.max_vfs = xe_modparam.max_vfs;
+#endif
}
struct engine_info {
@@ -220,6 +301,7 @@ struct engine_info {
/* Some helpful macros to aid on the sizing of buffer allocation when parsing */
#define MAX_ENGINE_CLASS_CHARS 5
#define MAX_ENGINE_INSTANCE_CHARS 2
+#define MAX_GT_TYPE_CHARS 7
static const struct engine_info engine_info[] = {
{ .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK, .engine_class = XE_ENGINE_CLASS_RENDER },
@@ -230,6 +312,14 @@ static const struct engine_info engine_info[] = {
{ .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK, .engine_class = XE_ENGINE_CLASS_OTHER },
};
+static const struct {
+ const char name[MAX_GT_TYPE_CHARS + 1];
+ enum xe_gt_type type;
+} gt_types[] = {
+ { .name = "primary", .type = XE_GT_TYPE_MAIN },
+ { .name = "media", .type = XE_GT_TYPE_MEDIA },
+};
+
static struct xe_config_group_device *to_xe_config_group_device(struct config_item *item)
{
return container_of(to_config_group(item), struct xe_config_group_device, group);
@@ -292,6 +382,57 @@ static ssize_t survivability_mode_store(struct config_item *item, const char *pa
return len;
}
+static ssize_t gt_types_allowed_show(struct config_item *item, char *page)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+ char *p = page;
+
+ for (size_t i = 0; i < ARRAY_SIZE(gt_types); i++)
+ if (dev->gt_types_allowed & BIT_ULL(gt_types[i].type))
+ p += sprintf(p, "%s\n", gt_types[i].name);
+
+ return p - page;
+}
+
+static ssize_t gt_types_allowed_store(struct config_item *item, const char *page,
+ size_t len)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+ char *buf __free(kfree) = kstrdup(page, GFP_KERNEL);
+ char *p = buf;
+ u64 typemask = 0;
+
+ if (!buf)
+ return -ENOMEM;
+
+ while (p) {
+ char *typename = strsep(&p, ",\n");
+ bool matched = false;
+
+ if (typename[0] == '\0')
+ continue;
+
+ for (size_t i = 0; i < ARRAY_SIZE(gt_types); i++) {
+ if (strcmp(typename, gt_types[i].name) == 0) {
+ typemask |= BIT(gt_types[i].type);
+ matched = true;
+ break;
+ }
+ }
+
+ if (!matched)
+ return -EINVAL;
+ }
+
+ guard(mutex)(&dev->lock);
+ if (is_bound(dev))
+ return -EBUSY;
+
+ dev->config.gt_types_allowed = typemask;
+
+ return len;
+}
+
static ssize_t engines_allowed_show(struct config_item *item, char *page)
{
struct xe_config_device *dev = to_xe_config_device(item);
@@ -672,6 +813,7 @@ CONFIGFS_ATTR(, ctx_restore_mid_bb);
CONFIGFS_ATTR(, ctx_restore_post_bb);
CONFIGFS_ATTR(, enable_psmi);
CONFIGFS_ATTR(, engines_allowed);
+CONFIGFS_ATTR(, gt_types_allowed);
CONFIGFS_ATTR(, survivability_mode);
static struct configfs_attribute *xe_config_device_attrs[] = {
@@ -679,6 +821,7 @@ static struct configfs_attribute *xe_config_device_attrs[] = {
&attr_ctx_restore_post_bb,
&attr_enable_psmi,
&attr_engines_allowed,
+ &attr_gt_types_allowed,
&attr_survivability_mode,
NULL,
};
@@ -721,6 +864,68 @@ static const struct config_item_type xe_config_device_type = {
.ct_owner = THIS_MODULE,
};
+static ssize_t sriov_max_vfs_show(struct config_item *item, char *page)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent);
+
+ guard(mutex)(&dev->lock);
+
+ if (dev->config.sriov.max_vfs == UINT_MAX)
+ return sprintf(page, "%s\n", "unlimited");
+ else
+ return sprintf(page, "%u\n", dev->config.sriov.max_vfs);
+}
+
+static ssize_t sriov_max_vfs_store(struct config_item *item, const char *page, size_t len)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent);
+ unsigned int max_vfs;
+ int ret;
+
+ guard(mutex)(&dev->lock);
+
+ if (is_bound(dev))
+ return -EBUSY;
+
+ ret = kstrtouint(page, 0, &max_vfs);
+ if (ret) {
+ if (!sysfs_streq(page, "unlimited"))
+ return ret;
+ max_vfs = UINT_MAX;
+ }
+
+ dev->config.sriov.max_vfs = max_vfs;
+ return len;
+}
+
+CONFIGFS_ATTR(sriov_, max_vfs);
+
+static struct configfs_attribute *xe_config_sriov_attrs[] = {
+ &sriov_attr_max_vfs,
+ NULL,
+};
+
+static bool xe_config_sriov_is_visible(struct config_item *item,
+ struct configfs_attribute *attr, int n)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent);
+
+ if (attr == &sriov_attr_max_vfs && dev->mode != XE_SRIOV_MODE_PF)
+ return false;
+
+ return true;
+}
+
+static struct configfs_group_operations xe_config_sriov_group_ops = {
+ .is_visible = xe_config_sriov_is_visible,
+};
+
+static const struct config_item_type xe_config_sriov_type = {
+ .ct_owner = THIS_MODULE,
+ .ct_group_ops = &xe_config_sriov_group_ops,
+ .ct_attrs = xe_config_sriov_attrs,
+};
+
static const struct xe_device_desc *xe_match_desc(struct pci_dev *pdev)
{
struct device_driver *driver = driver_find("xe", &pci_bus_type);
@@ -746,6 +951,7 @@ static struct config_group *xe_config_make_device_group(struct config_group *gro
unsigned int domain, bus, slot, function;
struct xe_config_group_device *dev;
const struct xe_device_desc *match;
+ enum xe_sriov_mode mode;
struct pci_dev *pdev;
char canonical[16];
int vfnumber = 0;
@@ -762,6 +968,9 @@ static struct config_group *xe_config_make_device_group(struct config_group *gro
return ERR_PTR(-EINVAL);
pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
+ mode = pdev ? dev_is_pf(&pdev->dev) ?
+ XE_SRIOV_MODE_PF : XE_SRIOV_MODE_NONE : XE_SRIOV_MODE_VF;
+
if (!pdev && function)
pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, 0));
if (!pdev && slot)
@@ -796,9 +1005,15 @@ static struct config_group *xe_config_make_device_group(struct config_group *gro
return ERR_PTR(-ENOMEM);
dev->desc = match;
+ dev->mode = match->has_sriov ? mode : XE_SRIOV_MODE_NONE;
+
set_device_defaults(&dev->config);
config_group_init_type_name(&dev->group, name, &xe_config_device_type);
+ if (dev->mode != XE_SRIOV_MODE_NONE) {
+ config_group_init_type_name(&dev->sriov, "sriov", &xe_config_sriov_type);
+ configfs_add_default_group(&dev->sriov, &dev->group);
+ }
mutex_init(&dev->lock);
@@ -846,6 +1061,7 @@ static void dump_custom_dev_config(struct pci_dev *pdev,
dev->config.attr_); \
} while (0)
+ PRI_CUSTOM_ATTR("%llx", gt_types_allowed);
PRI_CUSTOM_ATTR("%llx", engines_allowed);
PRI_CUSTOM_ATTR("%d", enable_psmi);
PRI_CUSTOM_ATTR("%d", survivability_mode);
@@ -896,6 +1112,44 @@ bool xe_configfs_get_survivability_mode(struct pci_dev *pdev)
return mode;
}
+static u64 get_gt_types_allowed(struct pci_dev *pdev)
+{
+ struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
+ u64 mask;
+
+ if (!dev)
+ return device_defaults.gt_types_allowed;
+
+ mask = dev->config.gt_types_allowed;
+ config_group_put(&dev->group);
+
+ return mask;
+}
+
+/**
+ * xe_configfs_primary_gt_allowed - determine whether primary GTs are supported
+ * @pdev: pci device
+ *
+ * Return: True if primary GTs are enabled, false if they have been disabled via
+ * configfs.
+ */
+bool xe_configfs_primary_gt_allowed(struct pci_dev *pdev)
+{
+ return get_gt_types_allowed(pdev) & BIT_ULL(XE_GT_TYPE_MAIN);
+}
+
+/**
+ * xe_configfs_media_gt_allowed - determine whether media GTs are supported
+ * @pdev: pci device
+ *
+ * Return: True if the media GTs are enabled, false if they have been disabled
+ * via configfs.
+ */
+bool xe_configfs_media_gt_allowed(struct pci_dev *pdev)
+{
+ return get_gt_types_allowed(pdev) & BIT_ULL(XE_GT_TYPE_MEDIA);
+}
+
/**
* xe_configfs_get_engines_allowed - get engine allowed mask from configfs
* @pdev: pci device
@@ -988,6 +1242,34 @@ u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev,
return len;
}
+#ifdef CONFIG_PCI_IOV
+/**
+ * xe_configfs_get_max_vfs() - Get number of VFs that could be managed
+ * @pdev: the &pci_dev device
+ *
+ * Find the configfs group that belongs to the PCI device and return maximum
+ * number of Virtual Functions (VFs) that could be managed by this device.
+ * If configfs group is not present, use value of max_vfs module parameter.
+ *
+ * Return: maximum number of VFs that could be managed.
+ */
+unsigned int xe_configfs_get_max_vfs(struct pci_dev *pdev)
+{
+ struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
+ unsigned int max_vfs;
+
+ if (!dev)
+ return xe_modparam.max_vfs;
+
+ scoped_guard(mutex, &dev->lock)
+ max_vfs = dev->config.sriov.max_vfs;
+
+ config_group_put(&dev->group);
+
+ return max_vfs;
+}
+#endif
+
int __init xe_configfs_init(void)
{
int ret;
diff --git a/drivers/gpu/drm/xe/xe_configfs.h b/drivers/gpu/drm/xe/xe_configfs.h
index c61e0e47ed94..fed57be0b90e 100644
--- a/drivers/gpu/drm/xe/xe_configfs.h
+++ b/drivers/gpu/drm/xe/xe_configfs.h
@@ -17,23 +17,31 @@ int xe_configfs_init(void);
void xe_configfs_exit(void);
void xe_configfs_check_device(struct pci_dev *pdev);
bool xe_configfs_get_survivability_mode(struct pci_dev *pdev);
+bool xe_configfs_primary_gt_allowed(struct pci_dev *pdev);
+bool xe_configfs_media_gt_allowed(struct pci_dev *pdev);
u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev);
bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev);
u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, enum xe_engine_class,
const u32 **cs);
u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, enum xe_engine_class,
const u32 **cs);
+#ifdef CONFIG_PCI_IOV
+unsigned int xe_configfs_get_max_vfs(struct pci_dev *pdev);
+#endif
#else
static inline int xe_configfs_init(void) { return 0; }
static inline void xe_configfs_exit(void) { }
static inline void xe_configfs_check_device(struct pci_dev *pdev) { }
static inline bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) { return false; }
+static inline bool xe_configfs_primary_gt_allowed(struct pci_dev *pdev) { return true; }
+static inline bool xe_configfs_media_gt_allowed(struct pci_dev *pdev) { return true; }
static inline u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev) { return U64_MAX; }
static inline bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev) { return false; }
static inline u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, enum xe_engine_class,
const u32 **cs) { return 0; }
static inline u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, enum xe_engine_class,
const u32 **cs) { return 0; }
+static inline unsigned int xe_configfs_get_max_vfs(struct pci_dev *pdev) { return UINT_MAX; }
#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
index cd977dbd1ef6..1c3c9557a9bd 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_debugfs.c
@@ -23,12 +23,12 @@
#include "xe_psmi.h"
#include "xe_pxp_debugfs.h"
#include "xe_sriov.h"
-#include "xe_sriov_pf.h"
+#include "xe_sriov_pf_debugfs.h"
#include "xe_sriov_vf.h"
#include "xe_step.h"
#include "xe_tile_debugfs.h"
-#include "xe_wa.h"
#include "xe_vsec.h"
+#include "xe_wa.h"
#ifdef CONFIG_DRM_XE_DEBUG
#include "xe_bo_evict.h"
@@ -349,17 +349,14 @@ static ssize_t disable_late_binding_set(struct file *f, const char __user *ubuf,
{
struct xe_device *xe = file_inode(f)->i_private;
struct xe_late_bind *late_bind = &xe->late_bind;
- u32 uval;
- ssize_t ret;
+ bool val;
+ int ret;
- ret = kstrtouint_from_user(ubuf, size, sizeof(uval), &uval);
+ ret = kstrtobool_from_user(ubuf, size, &val);
if (ret)
return ret;
- if (uval > 1)
- return -EINVAL;
-
- late_bind->disable = !!uval;
+ late_bind->disable = val;
return size;
}
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 34d33965eac2..5f6a412b571c 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -8,6 +8,7 @@
#include <linux/aperture.h>
#include <linux/delay.h>
#include <linux/fault-inject.h>
+#include <linux/iopoll.h>
#include <linux/units.h>
#include <drm/drm_atomic_helper.h>
@@ -630,16 +631,22 @@ mask_err:
return err;
}
-static bool verify_lmem_ready(struct xe_device *xe)
+static int lmem_initializing(struct xe_device *xe)
{
- u32 val = xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) & LMEM_INIT;
+ if (xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) & LMEM_INIT)
+ return 0;
+
+ if (signal_pending(current))
+ return -EINTR;
- return !!val;
+ return 1;
}
static int wait_for_lmem_ready(struct xe_device *xe)
{
- unsigned long timeout, start;
+ const unsigned long TIMEOUT_SEC = 60;
+ unsigned long prev_jiffies;
+ int initializing;
if (!IS_DGFX(xe))
return 0;
@@ -647,39 +654,35 @@ static int wait_for_lmem_ready(struct xe_device *xe)
if (IS_SRIOV_VF(xe))
return 0;
- if (verify_lmem_ready(xe))
+ if (!lmem_initializing(xe))
return 0;
drm_dbg(&xe->drm, "Waiting for lmem initialization\n");
+ prev_jiffies = jiffies;
- start = jiffies;
- timeout = start + secs_to_jiffies(60); /* 60 sec! */
-
- do {
- if (signal_pending(current))
- return -EINTR;
-
- /*
- * The boot firmware initializes local memory and
- * assesses its health. If memory training fails,
- * the punit will have been instructed to keep the GT powered
- * down.we won't be able to communicate with it
- *
- * If the status check is done before punit updates the register,
- * it can lead to the system being unusable.
- * use a timeout and defer the probe to prevent this.
- */
- if (time_after(jiffies, timeout)) {
- drm_dbg(&xe->drm, "lmem not initialized by firmware\n");
- return -EPROBE_DEFER;
- }
-
- msleep(20);
-
- } while (!verify_lmem_ready(xe));
+ /*
+ * The boot firmware initializes local memory and
+ * assesses its health. If memory training fails,
+ * the punit will have been instructed to keep the GT powered
+ * down.we won't be able to communicate with it
+ *
+ * If the status check is done before punit updates the register,
+ * it can lead to the system being unusable.
+ * use a timeout and defer the probe to prevent this.
+ */
+ poll_timeout_us(initializing = lmem_initializing(xe),
+ initializing <= 0,
+ 20 * USEC_PER_MSEC, TIMEOUT_SEC * USEC_PER_SEC, true);
+ if (initializing < 0)
+ return initializing;
+
+ if (initializing) {
+ drm_dbg(&xe->drm, "lmem not initialized by firmware\n");
+ return -EPROBE_DEFER;
+ }
drm_dbg(&xe->drm, "lmem ready after %ums",
- jiffies_to_msecs(jiffies - start));
+ jiffies_to_msecs(jiffies - prev_jiffies));
return 0;
}
@@ -779,6 +782,8 @@ static int probe_has_flat_ccs(struct xe_device *xe)
return 0;
gt = xe_root_mmio_gt(xe);
+ if (!gt)
+ return 0;
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (!fw_ref)
@@ -983,12 +988,12 @@ void xe_device_remove(struct xe_device *xe)
void xe_device_shutdown(struct xe_device *xe)
{
- struct xe_gt *gt;
- u8 id;
-
drm_dbg(&xe->drm, "Shutting down device\n");
if (xe_driver_flr_disabled(xe)) {
+ struct xe_gt *gt;
+ u8 id;
+
xe_display_pm_shutdown(xe);
xe_irq_suspend(xe);
@@ -1059,6 +1064,8 @@ void xe_device_l2_flush(struct xe_device *xe)
unsigned int fw_ref;
gt = xe_root_mmio_gt(xe);
+ if (!gt)
+ return;
if (!XE_GT_WA(gt, 16023588340))
return;
@@ -1104,6 +1111,9 @@ void xe_device_td_flush(struct xe_device *xe)
return;
root_gt = xe_root_mmio_gt(xe);
+ if (!root_gt)
+ return;
+
if (XE_GT_WA(root_gt, 16023588340)) {
/* A transient flush is not sufficient: flush the L2 */
xe_device_l2_flush(xe);
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c
index c5151c86a98a..ec9c06b06fb5 100644
--- a/drivers/gpu/drm/xe/xe_device_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.c
@@ -38,13 +38,8 @@ vram_d3cold_threshold_show(struct device *dev,
{
struct pci_dev *pdev = to_pci_dev(dev);
struct xe_device *xe = pdev_to_xe_device(pdev);
- int ret;
-
- xe_pm_runtime_get(xe);
- ret = sysfs_emit(buf, "%d\n", xe->d3cold.vram_threshold);
- xe_pm_runtime_put(xe);
- return ret;
+ return sysfs_emit(buf, "%d\n", xe->d3cold.vram_threshold);
}
static ssize_t
@@ -173,11 +168,8 @@ static umode_t late_bind_attr_is_visible(struct kobject *kobj,
u32 cap = 0;
int ret;
- xe_pm_runtime_get(xe);
-
ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
&cap, NULL);
- xe_pm_runtime_put(xe);
if (ret)
return 0;
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 74d7af830b85..9e3666a226da 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -27,6 +27,7 @@
#include "xe_sriov_vf_ccs_types.h"
#include "xe_step_types.h"
#include "xe_survivability_mode_types.h"
+#include "xe_tile_sriov_vf_types.h"
#include "xe_validation.h"
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
@@ -158,7 +159,15 @@ struct xe_tile {
/** @mem: memory management info for tile */
struct {
/**
- * @mem.vram: VRAM info for tile.
+ * @mem.kernel_vram: kernel-dedicated VRAM info for tile.
+ *
+ * Although VRAM is associated with a specific tile, it can
+ * still be accessed by all tiles' GTs.
+ */
+ struct xe_vram_region *kernel_vram;
+
+ /**
+ * @mem.vram: general purpose VRAM info for tile.
*
* Although VRAM is associated with a specific tile, it can
* still be accessed by all tiles' GTs.
@@ -185,6 +194,8 @@ struct xe_tile {
struct {
/** @sriov.vf.ggtt_balloon: GGTT regions excluded from use. */
struct xe_ggtt_node *ggtt_balloon[2];
+ /** @sriov.vf.self_config: VF configuration data */
+ struct xe_tile_sriov_vf_selfconfig self_config;
} vf;
} sriov;
@@ -318,6 +329,8 @@ struct xe_device {
u8 skip_mtcfg:1;
/** @info.skip_pcode: skip access to PCODE uC */
u8 skip_pcode:1;
+ /** @info.needs_shared_vf_gt_wq: needs shared GT WQ on VF */
+ u8 needs_shared_vf_gt_wq:1;
} info;
/** @wa_active: keep track of active workarounds */
diff --git a/drivers/gpu/drm/xe/xe_device_wa_oob.rules b/drivers/gpu/drm/xe/xe_device_wa_oob.rules
index 3a0c4ccc4224..55ba01bc8f38 100644
--- a/drivers/gpu/drm/xe/xe_device_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_device_wa_oob.rules
@@ -1,2 +1,5 @@
+22010954014 PLATFORM(DG2)
15015404425 PLATFORM(LUNARLAKE)
PLATFORM(PANTHERLAKE)
+22019338487_display PLATFORM(LUNARLAKE)
+14022085890 SUBPLATFORM(BATTLEMAGE, G21)
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index a7d67725c3ee..54e42960daad 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -48,32 +48,43 @@ static void xe_dma_buf_detach(struct dma_buf *dmabuf,
static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
{
- struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct dma_buf *dmabuf = attach->dmabuf;
+ struct drm_gem_object *obj = dmabuf->priv;
struct xe_bo *bo = gem_to_xe_bo(obj);
struct xe_device *xe = xe_bo_device(bo);
struct drm_exec *exec = XE_VALIDATION_UNSUPPORTED;
+ bool allow_vram = true;
int ret;
- /*
- * For now only support pinning in TT memory, for two reasons:
- * 1) Avoid pinning in a placement not accessible to some importers.
- * 2) Pinning in VRAM requires PIN accounting which is a to-do.
- */
- if (xe_bo_is_pinned(bo) && !xe_bo_is_mem_type(bo, XE_PL_TT)) {
+ if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
+ allow_vram = false;
+ } else {
+ list_for_each_entry(attach, &dmabuf->attachments, node) {
+ if (!attach->peer2peer) {
+ allow_vram = false;
+ break;
+ }
+ }
+ }
+
+ if (xe_bo_is_pinned(bo) && !xe_bo_is_mem_type(bo, XE_PL_TT) &&
+ !(xe_bo_is_vram(bo) && allow_vram)) {
drm_dbg(&xe->drm, "Can't migrate pinned bo for dma-buf pin.\n");
return -EINVAL;
}
- ret = xe_bo_migrate(bo, XE_PL_TT, NULL, exec);
- if (ret) {
- if (ret != -EINTR && ret != -ERESTARTSYS)
- drm_dbg(&xe->drm,
- "Failed migrating dma-buf to TT memory: %pe\n",
- ERR_PTR(ret));
- return ret;
+ if (!allow_vram) {
+ ret = xe_bo_migrate(bo, XE_PL_TT, NULL, exec);
+ if (ret) {
+ if (ret != -EINTR && ret != -ERESTARTSYS)
+ drm_dbg(&xe->drm,
+ "Failed migrating dma-buf to TT memory: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
}
- ret = xe_bo_pin_external(bo, true, exec);
+ ret = xe_bo_pin_external(bo, !allow_vram, exec);
xe_assert(xe, !ret);
return 0;
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c
index f5cfdf29fde3..650e45f6a7c7 100644
--- a/drivers/gpu/drm/xe/xe_eu_stall.c
+++ b/drivers/gpu/drm/xe/xe_eu_stall.c
@@ -124,6 +124,27 @@ struct xe_eu_stall_data_xe2 {
__u64 unused[6];
} __packed;
+/*
+ * EU stall data format for Xe3p arch GPUs.
+ */
+struct xe_eu_stall_data_xe3p {
+ __u64 ip_addr:61; /* Bits 0 to 60 */
+ __u64 tdr_count:8; /* Bits 61 to 68 */
+ __u64 other_count:8; /* Bits 69 to 76 */
+ __u64 control_count:8; /* Bits 77 to 84 */
+ __u64 pipestall_count:8; /* Bits 85 to 92 */
+ __u64 send_count:8; /* Bits 93 to 100 */
+ __u64 dist_acc_count:8; /* Bits 101 to 108 */
+ __u64 sbid_count:8; /* Bits 109 to 116 */
+ __u64 sync_count:8; /* Bits 117 to 124 */
+ __u64 inst_fetch_count:8; /* Bits 125 to 132 */
+ __u64 active_count:8; /* Bits 133 to 140 */
+ __u64 ex_id:3; /* Bits 141 to 143 */
+ __u64 end_flag:1; /* Bit 144 */
+ __u64 unused_bits:47;
+ __u64 unused[5];
+} __packed;
+
const u64 eu_stall_sampling_rates[] = {251, 251 * 2, 251 * 3, 251 * 4, 251 * 5, 251 * 6, 251 * 7};
/**
@@ -167,10 +188,13 @@ size_t xe_eu_stall_data_record_size(struct xe_device *xe)
{
size_t record_size = 0;
- if (xe->info.platform == XE_PVC)
- record_size = sizeof(struct xe_eu_stall_data_pvc);
+ if (GRAPHICS_VER(xe) >= 35)
+ record_size = sizeof(struct xe_eu_stall_data_xe3p);
else if (GRAPHICS_VER(xe) >= 20)
record_size = sizeof(struct xe_eu_stall_data_xe2);
+ else if (xe->info.platform == XE_PVC)
+ record_size = sizeof(struct xe_eu_stall_data_pvc);
+
xe_assert(xe, is_power_of_2(record_size));
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 7715e74bb945..0dc27476832b 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -16,6 +16,7 @@
#include "xe_exec_queue.h"
#include "xe_hw_engine_group.h"
#include "xe_macros.h"
+#include "xe_pm.h"
#include "xe_ring_ops_types.h"
#include "xe_sched_job.h"
#include "xe_sync.h"
@@ -123,7 +124,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct xe_validation_ctx ctx;
struct xe_sched_job *job;
struct xe_vm *vm;
- bool write_locked, skip_retry = false;
+ bool write_locked;
int err = 0;
struct xe_hw_engine_group *group;
enum xe_hw_engine_group_execution_mode mode, previous_mode;
@@ -247,7 +248,7 @@ retry:
* on task freezing during suspend / hibernate, the call will
* return -ERESTARTSYS and the IOCTL will be rerun.
*/
- err = wait_for_completion_interruptible(&xe->pm_block);
+ err = xe_pm_block_on_suspend(xe);
if (err)
goto err_unlock_list;
@@ -265,12 +266,6 @@ retry:
goto err_exec;
}
- if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
- err = -EWOULDBLOCK; /* Aliased to -EAGAIN */
- skip_retry = true;
- goto err_exec;
- }
-
if (xe_exec_queue_uses_pxp(q)) {
err = xe_vm_validate_protected(q->vm);
if (err)
@@ -327,8 +322,6 @@ retry:
xe_sched_job_init_user_fence(job, &syncs[i]);
}
- if (xe_exec_queue_is_lr(q))
- q->ring_ops->emit_job(job);
if (!xe_vm_in_lr_mode(vm))
xe_exec_queue_last_fence_set(q, vm, &job->drm.s_fence->finished);
xe_sched_job_push(job);
@@ -354,7 +347,7 @@ err_exec:
xe_validation_ctx_fini(&ctx);
err_unlock_list:
up_read(&vm->lock);
- if (err == -EAGAIN && !skip_retry)
+ if (err == -EAGAIN)
goto retry;
err_hw_exec_mode:
if (mode == EXEC_MODE_DMA_FENCE)
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 37b2b93b73d6..90cbc95f8e2e 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -15,6 +15,7 @@
#include "xe_dep_scheduler.h"
#include "xe_device.h"
#include "xe_gt.h"
+#include "xe_gt_sriov_vf.h"
#include "xe_hw_engine_class_sysfs.h"
#include "xe_hw_engine_group.h"
#include "xe_hw_fence.h"
@@ -28,6 +29,29 @@
#include "xe_vm.h"
#include "xe_pxp.h"
+/**
+ * DOC: Execution Queue
+ *
+ * An Execution queue is an interface for the HW context of execution.
+ * The user creates an execution queue, submits the GPU jobs through those
+ * queues and in the end destroys them.
+ *
+ * Execution queues can also be created by XeKMD itself for driver internal
+ * operations like object migration etc.
+ *
+ * An execution queue is associated with a specified HW engine or a group of
+ * engines (belonging to the same tile and engine class) and any GPU job
+ * submitted on the queue will be run on one of these engines.
+ *
+ * An execution queue is tied to an address space (VM). It holds a reference
+ * of the associated VM and the underlying Logical Ring Context/s (LRC/s)
+ * until the queue is destroyed.
+ *
+ * The execution queue sits on top of the submission backend. It opaquely
+ * handles the GuC and Execlist backends whichever the platform uses, and
+ * the ring operations the different engine classes support.
+ */
+
enum xe_exec_queue_sched_prop {
XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
XE_EXEC_QUEUE_TIMESLICE = 1,
@@ -160,7 +184,7 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
return q;
}
-static int __xe_exec_queue_init(struct xe_exec_queue *q)
+static int __xe_exec_queue_init(struct xe_exec_queue *q, u32 exec_queue_flags)
{
int i, err;
u32 flags = 0;
@@ -179,17 +203,37 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q)
flags |= XE_LRC_CREATE_RUNALONE;
}
+ if (!(exec_queue_flags & EXEC_QUEUE_FLAG_KERNEL))
+ flags |= XE_LRC_CREATE_USER_CTX;
+
+ err = q->ops->init(q);
+ if (err)
+ return err;
+
+ /*
+ * This must occur after q->ops->init to avoid race conditions during VF
+ * post-migration recovery, as the fixups for the LRC GGTT addresses
+ * depend on the queue being present in the backend tracking structure.
+ *
+ * In addition to above, we must wait on inflight GGTT changes to avoid
+ * writing out stale values here. Such wait provides a solid solution
+ * (without a race) only if the function can detect migration instantly
+ * from the moment vCPU resumes execution.
+ */
for (i = 0; i < q->width; ++i) {
- q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec, flags);
- if (IS_ERR(q->lrc[i])) {
- err = PTR_ERR(q->lrc[i]);
+ struct xe_lrc *lrc;
+
+ xe_gt_sriov_vf_wait_valid_ggtt(q->gt);
+ lrc = xe_lrc_create(q->hwe, q->vm, xe_lrc_ring_size(),
+ q->msix_vec, flags);
+ if (IS_ERR(lrc)) {
+ err = PTR_ERR(lrc);
goto err_lrc;
}
- }
- err = q->ops->init(q);
- if (err)
- goto err_lrc;
+ /* Pairs with READ_ONCE to xe_exec_queue_contexts_hwsp_rebase */
+ WRITE_ONCE(q->lrc[i], lrc);
+ }
return 0;
@@ -225,7 +269,7 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v
if (IS_ERR(q))
return q;
- err = __xe_exec_queue_init(q);
+ err = __xe_exec_queue_init(q, flags);
if (err)
goto err_post_alloc;
@@ -824,25 +868,6 @@ bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
!(q->flags & EXEC_QUEUE_FLAG_VM);
}
-static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
-{
- return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1;
-}
-
-/**
- * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
- * @q: The exec_queue
- *
- * Return: True if the exec_queue's ring is full, false otherwise.
- */
-bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
-{
- struct xe_lrc *lrc = q->lrc[0];
- s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
-
- return xe_exec_queue_num_job_inflight(q) >= max_job;
-}
-
/**
* xe_exec_queue_is_idle() - Whether an exec_queue is idle.
* @q: The exec_queue
@@ -1114,36 +1139,19 @@ int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch)
int err = 0;
for (i = 0; i < q->width; ++i) {
- xe_lrc_update_memirq_regs_with_address(q->lrc[i], q->hwe, scratch);
- xe_lrc_update_hwctx_regs_with_address(q->lrc[i]);
- err = xe_lrc_setup_wa_bb_with_scratch(q->lrc[i], q->hwe, scratch);
+ struct xe_lrc *lrc;
+
+ /* Pairs with WRITE_ONCE in __xe_exec_queue_init */
+ lrc = READ_ONCE(q->lrc[i]);
+ if (!lrc)
+ continue;
+
+ xe_lrc_update_memirq_regs_with_address(lrc, q->hwe, scratch);
+ xe_lrc_update_hwctx_regs_with_address(lrc);
+ err = xe_lrc_setup_wa_bb_with_scratch(lrc, q->hwe, scratch);
if (err)
break;
}
return err;
}
-
-/**
- * xe_exec_queue_jobs_ring_restore - Re-emit ring commands of requests pending on given queue.
- * @q: the &xe_exec_queue struct instance
- */
-void xe_exec_queue_jobs_ring_restore(struct xe_exec_queue *q)
-{
- struct xe_gpu_scheduler *sched = &q->guc->sched;
- struct xe_sched_job *job;
-
- /*
- * This routine is used within VF migration recovery. This means
- * using the lock here introduces a restriction: we cannot wait
- * for any GFX HW response while the lock is taken.
- */
- spin_lock(&sched->base.job_list_lock);
- list_for_each_entry(job, &sched->base.pending_list, drm.list) {
- if (xe_sched_job_is_error(job))
- continue;
-
- q->ring_ops->emit_job(job);
- }
- spin_unlock(&sched->base.job_list_lock);
-}
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h
index 15ec852e7f7e..a4dfbe858bda 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue.h
@@ -64,8 +64,6 @@ static inline bool xe_exec_queue_uses_pxp(struct xe_exec_queue *q)
bool xe_exec_queue_is_lr(struct xe_exec_queue *q);
-bool xe_exec_queue_ring_full(struct xe_exec_queue *q);
-
bool xe_exec_queue_is_idle(struct xe_exec_queue *q);
void xe_exec_queue_kill(struct xe_exec_queue *q);
@@ -92,7 +90,6 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q);
int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch);
-void xe_exec_queue_jobs_ring_restore(struct xe_exec_queue *q);
-
struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 27b76cf9da89..282505fa1377 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -207,6 +207,9 @@ struct xe_exec_queue_ops {
* call after suspend. In dma-fencing path thus must return within a
* reasonable amount of time. -ETIME return shall indicate an error
* waiting for suspend resulting in associated VM getting killed.
+ * -EAGAIN return indicates the wait should be tried again, if the wait
+ * is within a work item, the work item should be requeued as deadlock
+ * avoidance mechanism.
*/
int (*suspend_wait)(struct xe_exec_queue *q);
/**
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index f83d421ac9d3..769d05517f93 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -339,7 +339,7 @@ static int execlist_exec_queue_init(struct xe_exec_queue *q)
const struct drm_sched_init_args args = {
.ops = &drm_sched_ops,
.num_rqs = 1,
- .credit_limit = q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES,
+ .credit_limit = xe_lrc_ring_size() / MAX_JOB_SIZE_BYTES,
.hang_limit = XE_SCHED_HANG_LIMIT,
.timeout = XE_SCHED_JOB_TIMEOUT,
.name = q->hwe->name,
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 5edc0cad47e2..20d226d90c50 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -107,10 +107,23 @@ static unsigned int probe_gsm_size(struct pci_dev *pdev)
static void ggtt_update_access_counter(struct xe_ggtt *ggtt)
{
struct xe_tile *tile = ggtt->tile;
- struct xe_gt *affected_gt = XE_GT_WA(tile->primary_gt, 22019338487) ?
- tile->primary_gt : tile->media_gt;
- struct xe_mmio *mmio = &affected_gt->mmio;
- u32 max_gtt_writes = XE_GT_WA(ggtt->tile->primary_gt, 22019338487) ? 1100 : 63;
+ struct xe_gt *affected_gt;
+ u32 max_gtt_writes;
+
+ if (tile->primary_gt && XE_GT_WA(tile->primary_gt, 22019338487)) {
+ affected_gt = tile->primary_gt;
+ max_gtt_writes = 1100;
+
+ /* Only expected to apply to primary GT on dgpu platforms */
+ xe_tile_assert(tile, IS_DGFX(tile_to_xe(tile)));
+ } else {
+ affected_gt = tile->media_gt;
+ max_gtt_writes = 63;
+
+ /* Only expected to apply to media GT on igpu platforms */
+ xe_tile_assert(tile, !IS_DGFX(tile_to_xe(tile)));
+ }
+
/*
* Wa_22019338487: GMD_ID is a RO register, a dummy write forces gunit
* to wait for completion of prior GTT writes before letting this through.
@@ -119,7 +132,7 @@ static void ggtt_update_access_counter(struct xe_ggtt *ggtt)
lockdep_assert_held(&ggtt->lock);
if ((++ggtt->access_count % max_gtt_writes) == 0) {
- xe_mmio_write32(mmio, GMD_ID, 0x0);
+ xe_mmio_write32(&affected_gt->mmio, GMD_ID, 0x0);
ggtt->access_count = 0;
}
}
@@ -159,6 +172,16 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
}
}
+static void primelockdep(struct xe_ggtt *ggtt)
+{
+ if (!IS_ENABLED(CONFIG_LOCKDEP))
+ return;
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&ggtt->lock);
+ fs_reclaim_release(GFP_KERNEL);
+}
+
/**
* xe_ggtt_alloc - Allocate a GGTT for a given &xe_tile
* @tile: &xe_tile
@@ -169,9 +192,19 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
*/
struct xe_ggtt *xe_ggtt_alloc(struct xe_tile *tile)
{
- struct xe_ggtt *ggtt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*ggtt), GFP_KERNEL);
- if (ggtt)
- ggtt->tile = tile;
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_ggtt *ggtt;
+
+ ggtt = drmm_kzalloc(&xe->drm, sizeof(*ggtt), GFP_KERNEL);
+ if (!ggtt)
+ return NULL;
+
+ if (drmm_mutex_init(&xe->drm, &ggtt->lock))
+ return NULL;
+
+ primelockdep(ggtt);
+ ggtt->tile = tile;
+
return ggtt;
}
@@ -180,7 +213,6 @@ static void ggtt_fini_early(struct drm_device *drm, void *arg)
struct xe_ggtt *ggtt = arg;
destroy_workqueue(ggtt->wq);
- mutex_destroy(&ggtt->lock);
drm_mm_takedown(&ggtt->mm);
}
@@ -198,16 +230,6 @@ void xe_ggtt_might_lock(struct xe_ggtt *ggtt)
}
#endif
-static void primelockdep(struct xe_ggtt *ggtt)
-{
- if (!IS_ENABLED(CONFIG_LOCKDEP))
- return;
-
- fs_reclaim_acquire(GFP_KERNEL);
- might_lock(&ggtt->lock);
- fs_reclaim_release(GFP_KERNEL);
-}
-
static const struct xe_ggtt_pt_ops xelp_pt_ops = {
.pte_encode_flags = xelp_ggtt_pte_flags,
.ggtt_set_pte = xe_ggtt_set_pte,
@@ -227,8 +249,6 @@ static void __xe_ggtt_init_early(struct xe_ggtt *ggtt, u32 reserved)
{
drm_mm_init(&ggtt->mm, reserved,
ggtt->size - reserved);
- mutex_init(&ggtt->lock);
- primelockdep(ggtt);
}
int xe_ggtt_init_kunit(struct xe_ggtt *ggtt, u32 reserved, u32 size)
@@ -284,10 +304,10 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
ggtt->size = GUC_GGTT_TOP;
if (GRAPHICS_VERx100(xe) >= 1270)
- ggtt->pt_ops = (ggtt->tile->media_gt &&
- XE_GT_WA(ggtt->tile->media_gt, 22019338487)) ||
- XE_GT_WA(ggtt->tile->primary_gt, 22019338487) ?
- &xelpg_pt_wa_ops : &xelpg_pt_ops;
+ ggtt->pt_ops =
+ (ggtt->tile->media_gt && XE_GT_WA(ggtt->tile->media_gt, 22019338487)) ||
+ (ggtt->tile->primary_gt && XE_GT_WA(ggtt->tile->primary_gt, 22019338487)) ?
+ &xelpg_pt_wa_ops : &xelpg_pt_ops;
else
ggtt->pt_ops = &xelp_pt_ops;
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.c b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
index 455ccaf17314..f91e06d03511 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.c
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
@@ -101,19 +101,6 @@ void xe_sched_submission_stop(struct xe_gpu_scheduler *sched)
cancel_work_sync(&sched->work_process_msg);
}
-/**
- * xe_sched_submission_stop_async - Stop further runs of submission tasks on a scheduler.
- * @sched: the &xe_gpu_scheduler struct instance
- *
- * This call disables further runs of scheduling work queue. It does not wait
- * for any in-progress runs to finish, only makes sure no further runs happen
- * afterwards.
- */
-void xe_sched_submission_stop_async(struct xe_gpu_scheduler *sched)
-{
- drm_sched_wqueue_stop(&sched->base);
-}
-
void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched)
{
drm_sched_resume_timeout(&sched->base, sched->base.timeout);
@@ -135,3 +122,17 @@ void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
list_add_tail(&msg->link, &sched->msgs);
xe_sched_process_msg_queue(sched);
}
+
+/**
+ * xe_sched_add_msg_head() - Xe GPU scheduler add message to head of list
+ * @sched: Xe GPU scheduler
+ * @msg: Message to add
+ */
+void xe_sched_add_msg_head(struct xe_gpu_scheduler *sched,
+ struct xe_sched_msg *msg)
+{
+ lockdep_assert_held(&sched->base.job_list_lock);
+
+ list_add(&msg->link, &sched->msgs);
+ xe_sched_process_msg_queue(sched);
+}
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
index e548b2aed95a..9955397aaaa9 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
@@ -7,7 +7,7 @@
#define _XE_GPU_SCHEDULER_H_
#include "xe_gpu_scheduler_types.h"
-#include "xe_sched_job_types.h"
+#include "xe_sched_job.h"
int xe_sched_init(struct xe_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
@@ -21,7 +21,6 @@ void xe_sched_fini(struct xe_gpu_scheduler *sched);
void xe_sched_submission_start(struct xe_gpu_scheduler *sched);
void xe_sched_submission_stop(struct xe_gpu_scheduler *sched);
-void xe_sched_submission_stop_async(struct xe_gpu_scheduler *sched);
void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched);
@@ -29,6 +28,8 @@ void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
struct xe_sched_msg *msg);
void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
struct xe_sched_msg *msg);
+void xe_sched_add_msg_head(struct xe_gpu_scheduler *sched,
+ struct xe_sched_msg *msg);
static inline void xe_sched_msg_lock(struct xe_gpu_scheduler *sched)
{
@@ -58,7 +59,8 @@ static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
struct drm_sched_fence *s_fence = s_job->s_fence;
struct dma_fence *hw_fence = s_fence->parent;
- if (hw_fence && !dma_fence_is_signaled(hw_fence))
+ if (to_xe_sched_job(s_job)->skip_emit ||
+ (hw_fence && !dma_fence_is_signaled(hw_fence)))
sched->base.ops->run_job(s_job);
}
}
@@ -77,17 +79,30 @@ static inline void xe_sched_add_pending_job(struct xe_gpu_scheduler *sched,
spin_unlock(&sched->base.job_list_lock);
}
+/**
+ * xe_sched_first_pending_job() - Find first pending job which is unsignaled
+ * @sched: Xe GPU scheduler
+ *
+ * Return first unsignaled job in pending list or NULL
+ */
static inline
struct xe_sched_job *xe_sched_first_pending_job(struct xe_gpu_scheduler *sched)
{
- struct xe_sched_job *job;
+ struct xe_sched_job *job, *r_job = NULL;
spin_lock(&sched->base.job_list_lock);
- job = list_first_entry_or_null(&sched->base.pending_list,
- struct xe_sched_job, drm.list);
+ list_for_each_entry(job, &sched->base.pending_list, drm.list) {
+ struct drm_sched_fence *s_fence = job->drm.s_fence;
+ struct dma_fence *hw_fence = s_fence->parent;
+
+ if (hw_fence && !dma_fence_is_signaled(hw_fence)) {
+ r_job = job;
+ break;
+ }
+ }
spin_unlock(&sched->base.job_list_lock);
- return job;
+ return r_job;
}
static inline int
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index 83d61bf8ec62..dd69cb834f8e 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -266,7 +266,7 @@ static int gsc_upload_and_init(struct xe_gsc *gsc)
unsigned int fw_ref;
int ret;
- if (XE_GT_WA(tile->primary_gt, 14018094691)) {
+ if (tile->primary_gt && XE_GT_WA(tile->primary_gt, 14018094691)) {
fw_ref = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
/*
@@ -281,7 +281,7 @@ static int gsc_upload_and_init(struct xe_gsc *gsc)
ret = gsc_upload(gsc);
- if (XE_GT_WA(tile->primary_gt, 14018094691))
+ if (tile->primary_gt && XE_GT_WA(tile->primary_gt, 14018094691))
xe_force_wake_put(gt_to_fw(tile->primary_gt), fw_ref);
if (ret)
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 3e0ad7e5b5df..d8e94fb8b9bd 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -65,29 +65,29 @@
#include "xe_wa.h"
#include "xe_wopcm.h"
-static void gt_fini(struct drm_device *drm, void *arg)
-{
- struct xe_gt *gt = arg;
-
- destroy_workqueue(gt->ordered_wq);
-}
-
struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct drm_device *drm = &xe->drm;
+ bool shared_wq = xe->info.needs_shared_vf_gt_wq && tile->primary_gt &&
+ IS_SRIOV_VF(xe);
+ struct workqueue_struct *ordered_wq;
struct xe_gt *gt;
- int err;
- gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL);
+ gt = drmm_kzalloc(drm, sizeof(*gt), GFP_KERNEL);
if (!gt)
return ERR_PTR(-ENOMEM);
gt->tile = tile;
- gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq",
- WQ_MEM_RECLAIM);
+ if (shared_wq && tile->primary_gt->ordered_wq)
+ ordered_wq = tile->primary_gt->ordered_wq;
+ else
+ ordered_wq = drmm_alloc_ordered_workqueue(drm, "gt-ordered-wq",
+ WQ_MEM_RECLAIM);
+ if (IS_ERR(ordered_wq))
+ return ERR_CAST(ordered_wq);
- err = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, gt_fini, gt);
- if (err)
- return ERR_PTR(err);
+ gt->ordered_wq = ordered_wq;
return gt;
}
@@ -398,6 +398,12 @@ int xe_gt_init_early(struct xe_gt *gt)
return err;
}
+ if (IS_SRIOV_VF(gt_to_xe(gt))) {
+ err = xe_gt_sriov_vf_init_early(gt);
+ if (err)
+ return err;
+ }
+
xe_reg_sr_init(&gt->reg_sr, "GT", gt_to_xe(gt));
err = xe_wa_gt_init(gt);
@@ -583,10 +589,8 @@ static int gt_init_with_all_forcewake(struct xe_gt *gt)
if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_is_main_type(gt))
xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
- if (IS_SRIOV_PF(gt_to_xe(gt))) {
- xe_gt_sriov_pf_init(gt);
+ if (IS_SRIOV_PF(gt_to_xe(gt)))
xe_gt_sriov_pf_init_hw(gt);
- }
xe_force_wake_put(gt_to_fw(gt), fw_ref);
@@ -657,6 +661,12 @@ int xe_gt_init(struct xe_gt *gt)
if (err)
return err;
+ if (IS_SRIOV_VF(gt_to_xe(gt))) {
+ err = xe_gt_sriov_vf_init(gt);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -803,11 +813,6 @@ static int do_gt_restart(struct xe_gt *gt)
return 0;
}
-static int gt_wait_reset_unblock(struct xe_gt *gt)
-{
- return xe_guc_wait_reset_unblock(&gt->uc.guc);
-}
-
static int gt_reset(struct xe_gt *gt)
{
unsigned int fw_ref;
@@ -822,10 +827,6 @@ static int gt_reset(struct xe_gt *gt)
xe_gt_info(gt, "reset started\n");
- err = gt_wait_reset_unblock(gt);
- if (!err)
- xe_gt_warn(gt, "reset block failed to get lifted");
-
xe_pm_runtime_get(gt_to_xe(gt));
if (xe_fault_inject_gt_reset()) {
diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
index 41880979f4de..9d710049da45 100644
--- a/drivers/gpu/drm/xe/xe_gt.h
+++ b/drivers/gpu/drm/xe/xe_gt.h
@@ -12,6 +12,7 @@
#include "xe_device.h"
#include "xe_device_types.h"
+#include "xe_gt_sriov_vf.h"
#include "xe_hw_engine.h"
#define for_each_hw_engine(hwe__, gt__, id__) \
@@ -21,6 +22,12 @@
#define CCS_MASK(gt) (((gt)->info.engine_mask & XE_HW_ENGINE_CCS_MASK) >> XE_HW_ENGINE_CCS0)
+#define GT_VER(gt) ({ \
+ typeof(gt) gt_ = (gt); \
+ struct xe_device *xe = gt_to_xe(gt_); \
+ xe_gt_is_media_type(gt_) ? MEDIA_VER(xe) : GRAPHICS_VER(xe); \
+})
+
extern struct fault_attr gt_reset_failure;
static inline bool xe_fault_inject_gt_reset(void)
{
@@ -124,4 +131,16 @@ static inline bool xe_gt_is_usm_hwe(struct xe_gt *gt, struct xe_hw_engine *hwe)
hwe->instance == gt->usm.reserved_bcs_instance;
}
+/**
+ * xe_gt_recovery_pending() - GT recovery pending
+ * @gt: the &xe_gt
+ *
+ * Return: True if GT recovery in pending, False otherwise
+ */
+static inline bool xe_gt_recovery_pending(struct xe_gt *gt)
+{
+ return IS_SRIOV_VF(gt_to_xe(gt)) &&
+ xe_gt_sriov_vf_recovery_pending(gt);
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_clock.c b/drivers/gpu/drm/xe/xe_gt_clock.c
index 4f011d1573c6..00f5972c14dc 100644
--- a/drivers/gpu/drm/xe/xe_gt_clock.c
+++ b/drivers/gpu/drm/xe/xe_gt_clock.c
@@ -55,30 +55,11 @@ static void read_crystal_clock(struct xe_gt *gt, u32 rpm_config_reg, u32 *freq,
}
}
-static void check_ctc_mode(struct xe_gt *gt)
-{
- /*
- * CTC_MODE[0] = 1 is definitely not supported for Xe2 and later
- * platforms. In theory it could be a valid setting for pre-Xe2
- * platforms, but there's no documentation on how to properly handle
- * this case. Reading TIMESTAMP_OVERRIDE, as the driver attempted in
- * the past has been confirmed as incorrect by the hardware architects.
- *
- * For now just warn if we ever encounter hardware in the wild that
- * has this setting and move on as if it hadn't been set.
- */
- if (xe_mmio_read32(&gt->mmio, CTC_MODE) & CTC_SOURCE_DIVIDE_LOGIC)
- xe_gt_warn(gt, "CTC_MODE[0] is set; this is unexpected and undocumented\n");
-}
-
int xe_gt_clock_init(struct xe_gt *gt)
{
u32 freq;
u32 c0;
- if (!IS_SRIOV_VF(gt_to_xe(gt)))
- check_ctc_mode(gt);
-
c0 = xe_mmio_read32(&gt->mmio, RPM_CONFIG0);
read_crystal_clock(gt, c0, &freq, &gt->info.timestamp_base);
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index f253e2df4907..e4fd632f43cf 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -12,7 +12,6 @@
#include "xe_device.h"
#include "xe_force_wake.h"
-#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_gt_mcr.h"
#include "xe_gt_idle.h"
@@ -36,6 +35,11 @@
#include "xe_uc_debugfs.h"
#include "xe_wa.h"
+static struct xe_gt *node_to_gt(struct drm_info_node *node)
+{
+ return node->dent->d_parent->d_inode->i_private;
+}
+
/**
* xe_gt_debugfs_simple_show - A show callback for struct drm_info_list
* @m: the &seq_file
@@ -78,8 +82,7 @@ int xe_gt_debugfs_simple_show(struct seq_file *m, void *data)
{
struct drm_printer p = drm_seq_file_printer(m);
struct drm_info_node *node = m->private;
- struct dentry *parent = node->dent->d_parent;
- struct xe_gt *gt = parent->d_inode->i_private;
+ struct xe_gt *gt = node_to_gt(node);
int (*print)(struct xe_gt *, struct drm_printer *) = node->info_ent->data;
if (WARN_ON(!print))
@@ -88,15 +91,36 @@ int xe_gt_debugfs_simple_show(struct seq_file *m, void *data)
return print(gt, &p);
}
-static int hw_engines(struct xe_gt *gt, struct drm_printer *p)
+/**
+ * xe_gt_debugfs_show_with_rpm - A show callback for struct drm_info_list
+ * @m: the &seq_file
+ * @data: data used by the drm debugfs helpers
+ *
+ * Similar to xe_gt_debugfs_simple_show() but implicitly takes a RPM ref.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_debugfs_show_with_rpm(struct seq_file *m, void *data)
{
+ struct drm_info_node *node = m->private;
+ struct xe_gt *gt = node_to_gt(node);
struct xe_device *xe = gt_to_xe(gt);
+ int ret;
+
+ xe_pm_runtime_get(xe);
+ ret = xe_gt_debugfs_simple_show(m, data);
+ xe_pm_runtime_put(xe);
+
+ return ret;
+}
+
+static int hw_engines(struct xe_gt *gt, struct drm_printer *p)
+{
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
unsigned int fw_ref;
int ret = 0;
- xe_pm_runtime_get(xe);
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
ret = -ETIMEDOUT;
@@ -108,58 +132,21 @@ static int hw_engines(struct xe_gt *gt, struct drm_printer *p)
fw_put:
xe_force_wake_put(gt_to_fw(gt), fw_ref);
- xe_pm_runtime_put(xe);
-
- return ret;
-}
-
-static int powergate_info(struct xe_gt *gt, struct drm_printer *p)
-{
- int ret;
-
- xe_pm_runtime_get(gt_to_xe(gt));
- ret = xe_gt_idle_pg_print(gt, p);
- xe_pm_runtime_put(gt_to_xe(gt));
return ret;
}
-static int topology(struct xe_gt *gt, struct drm_printer *p)
-{
- xe_pm_runtime_get(gt_to_xe(gt));
- xe_gt_topology_dump(gt, p);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return 0;
-}
-
static int steering(struct xe_gt *gt, struct drm_printer *p)
{
- xe_pm_runtime_get(gt_to_xe(gt));
xe_gt_mcr_steering_dump(gt, p);
- xe_pm_runtime_put(gt_to_xe(gt));
-
return 0;
}
-static int ggtt(struct xe_gt *gt, struct drm_printer *p)
-{
- int ret;
-
- xe_pm_runtime_get(gt_to_xe(gt));
- ret = xe_ggtt_dump(gt_to_tile(gt)->mem.ggtt, p);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return ret;
-}
-
static int register_save_restore(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
- xe_pm_runtime_get(gt_to_xe(gt));
-
xe_reg_sr_dump(&gt->reg_sr, p);
drm_printf(p, "\n");
@@ -177,98 +164,42 @@ static int register_save_restore(struct xe_gt *gt, struct drm_printer *p)
for_each_hw_engine(hwe, gt, id)
xe_reg_whitelist_dump(&hwe->reg_whitelist, p);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return 0;
-}
-
-static int workarounds(struct xe_gt *gt, struct drm_printer *p)
-{
- xe_pm_runtime_get(gt_to_xe(gt));
- xe_wa_dump(gt, p);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return 0;
-}
-
-static int tunings(struct xe_gt *gt, struct drm_printer *p)
-{
- xe_pm_runtime_get(gt_to_xe(gt));
- xe_tuning_dump(gt, p);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return 0;
-}
-
-static int pat(struct xe_gt *gt, struct drm_printer *p)
-{
- xe_pm_runtime_get(gt_to_xe(gt));
- xe_pat_dump(gt, p);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return 0;
-}
-
-static int mocs(struct xe_gt *gt, struct drm_printer *p)
-{
- xe_pm_runtime_get(gt_to_xe(gt));
- xe_mocs_dump(gt, p);
- xe_pm_runtime_put(gt_to_xe(gt));
-
return 0;
}
static int rcs_default_lrc(struct xe_gt *gt, struct drm_printer *p)
{
- xe_pm_runtime_get(gt_to_xe(gt));
xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_RENDER);
- xe_pm_runtime_put(gt_to_xe(gt));
-
return 0;
}
static int ccs_default_lrc(struct xe_gt *gt, struct drm_printer *p)
{
- xe_pm_runtime_get(gt_to_xe(gt));
xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_COMPUTE);
- xe_pm_runtime_put(gt_to_xe(gt));
-
return 0;
}
static int bcs_default_lrc(struct xe_gt *gt, struct drm_printer *p)
{
- xe_pm_runtime_get(gt_to_xe(gt));
xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_COPY);
- xe_pm_runtime_put(gt_to_xe(gt));
-
return 0;
}
static int vcs_default_lrc(struct xe_gt *gt, struct drm_printer *p)
{
- xe_pm_runtime_get(gt_to_xe(gt));
xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_VIDEO_DECODE);
- xe_pm_runtime_put(gt_to_xe(gt));
-
return 0;
}
static int vecs_default_lrc(struct xe_gt *gt, struct drm_printer *p)
{
- xe_pm_runtime_get(gt_to_xe(gt));
xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_VIDEO_ENHANCE);
- xe_pm_runtime_put(gt_to_xe(gt));
-
return 0;
}
static int hwconfig(struct xe_gt *gt, struct drm_printer *p)
{
- xe_pm_runtime_get(gt_to_xe(gt));
xe_guc_hwconfig_dump(&gt->uc.guc, p);
- xe_pm_runtime_put(gt_to_xe(gt));
-
return 0;
}
@@ -278,26 +209,26 @@ static int hwconfig(struct xe_gt *gt, struct drm_printer *p)
* - without access to the PF specific data
*/
static const struct drm_info_list vf_safe_debugfs_list[] = {
- {"topology", .show = xe_gt_debugfs_simple_show, .data = topology},
- {"ggtt", .show = xe_gt_debugfs_simple_show, .data = ggtt},
- {"register-save-restore", .show = xe_gt_debugfs_simple_show, .data = register_save_restore},
- {"workarounds", .show = xe_gt_debugfs_simple_show, .data = workarounds},
- {"tunings", .show = xe_gt_debugfs_simple_show, .data = tunings},
- {"default_lrc_rcs", .show = xe_gt_debugfs_simple_show, .data = rcs_default_lrc},
- {"default_lrc_ccs", .show = xe_gt_debugfs_simple_show, .data = ccs_default_lrc},
- {"default_lrc_bcs", .show = xe_gt_debugfs_simple_show, .data = bcs_default_lrc},
- {"default_lrc_vcs", .show = xe_gt_debugfs_simple_show, .data = vcs_default_lrc},
- {"default_lrc_vecs", .show = xe_gt_debugfs_simple_show, .data = vecs_default_lrc},
- {"hwconfig", .show = xe_gt_debugfs_simple_show, .data = hwconfig},
+ { "topology", .show = xe_gt_debugfs_show_with_rpm, .data = xe_gt_topology_dump },
+ { "register-save-restore",
+ .show = xe_gt_debugfs_show_with_rpm, .data = register_save_restore },
+ { "workarounds", .show = xe_gt_debugfs_show_with_rpm, .data = xe_wa_gt_dump },
+ { "tunings", .show = xe_gt_debugfs_show_with_rpm, .data = xe_tuning_dump },
+ { "default_lrc_rcs", .show = xe_gt_debugfs_show_with_rpm, .data = rcs_default_lrc },
+ { "default_lrc_ccs", .show = xe_gt_debugfs_show_with_rpm, .data = ccs_default_lrc },
+ { "default_lrc_bcs", .show = xe_gt_debugfs_show_with_rpm, .data = bcs_default_lrc },
+ { "default_lrc_vcs", .show = xe_gt_debugfs_show_with_rpm, .data = vcs_default_lrc },
+ { "default_lrc_vecs", .show = xe_gt_debugfs_show_with_rpm, .data = vecs_default_lrc },
+ { "hwconfig", .show = xe_gt_debugfs_show_with_rpm, .data = hwconfig },
};
/* everything else should be added here */
static const struct drm_info_list pf_only_debugfs_list[] = {
- {"hw_engines", .show = xe_gt_debugfs_simple_show, .data = hw_engines},
- {"mocs", .show = xe_gt_debugfs_simple_show, .data = mocs},
- {"pat", .show = xe_gt_debugfs_simple_show, .data = pat},
- {"powergate_info", .show = xe_gt_debugfs_simple_show, .data = powergate_info},
- {"steering", .show = xe_gt_debugfs_simple_show, .data = steering},
+ { "hw_engines", .show = xe_gt_debugfs_show_with_rpm, .data = hw_engines },
+ { "mocs", .show = xe_gt_debugfs_show_with_rpm, .data = xe_mocs_dump },
+ { "pat", .show = xe_gt_debugfs_show_with_rpm, .data = xe_pat_dump },
+ { "powergate_info", .show = xe_gt_debugfs_show_with_rpm, .data = xe_gt_idle_pg_print },
+ { "steering", .show = xe_gt_debugfs_show_with_rpm, .data = steering },
};
static ssize_t write_to_gt_call(const char __user *userbuf, size_t count, loff_t *ppos,
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.h b/drivers/gpu/drm/xe/xe_gt_debugfs.h
index 05a6cc93c78c..32ee3264051b 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.h
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.h
@@ -11,5 +11,6 @@ struct xe_gt;
void xe_gt_debugfs_register(struct xe_gt *gt);
int xe_gt_debugfs_simple_show(struct seq_file *m, void *data);
+int xe_gt_debugfs_show_with_rpm(struct seq_file *m, void *data);
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
index 4ff1b6b58d6b..701349251bbc 100644
--- a/drivers/gpu/drm/xe/xe_gt_freq.c
+++ b/drivers/gpu/drm/xe/xe_gt_freq.c
@@ -99,13 +99,8 @@ static ssize_t rp0_freq_show(struct kobject *kobj,
{
struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
- u32 freq;
- xe_pm_runtime_get(dev_to_xe(dev));
- freq = xe_guc_pc_get_rp0_freq(pc);
- xe_pm_runtime_put(dev_to_xe(dev));
-
- return sysfs_emit(buf, "%d\n", freq);
+ return sysfs_emit(buf, "%d\n", xe_guc_pc_get_rp0_freq(pc));
}
static struct kobj_attribute attr_rp0_freq = __ATTR_RO(rp0_freq);
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index 8fb1cae91724..81ecd9382635 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -169,6 +169,15 @@ static const struct xe_mmio_range xelpg_dss_steering_table[] = {
{},
};
+static const struct xe_mmio_range xe3p_xpc_xecore_steering_table[] = {
+ { 0x008140, 0x00817F }, /* SLICE, XeCore, SLICE */
+ { 0x009480, 0x00955F }, /* SLICE, XeCore */
+ { 0x00D800, 0x00D87F }, /* SLICE */
+ { 0x00DC00, 0x00E9FF }, /* SLICE, rsvd, XeCore, rsvd, XeCore, rsvd, XeCore */
+ { 0x013000, 0x0135FF }, /* XeCore, SLICE */
+ {},
+};
+
static const struct xe_mmio_range xelpmp_oaddrm_steering_table[] = {
{ 0x393200, 0x39323F },
{ 0x393400, 0x3934FF },
@@ -236,16 +245,41 @@ static const struct xe_mmio_range xe2lpm_instance0_steering_table[] = {
};
static const struct xe_mmio_range xe3lpm_instance0_steering_table[] = {
- { 0x384000, 0x3847DF }, /* GAM, rsvd, GAM */
+ { 0x384000, 0x3841FF }, /* GAM */
+ { 0x384400, 0x3847DF }, /* GAM */
{ 0x384900, 0x384AFF }, /* GAM */
{ 0x389560, 0x3895FF }, /* MEDIAINF */
{ 0x38B600, 0x38B8FF }, /* L3BANK */
{ 0x38C800, 0x38D07F }, /* GAM, MEDIAINF */
- { 0x38D0D0, 0x38F0FF }, /* MEDIAINF, GAM */
+ { 0x38D0D0, 0x38F0FF }, /* MEDIAINF, rsvd, GAM */
{ 0x393C00, 0x393C7F }, /* MEDIAINF */
{},
};
+/*
+ * Different "GAM" ranges have different rules; GAMWKRS, STLB, and GAMREQSTRM
+ * range subtypes need to be steered to (1,0), while all other GAM subtypes
+ * are steered to (0,0) and are included in the "INSTANCE0" table farther
+ * down.
+ */
+static const struct xe_mmio_range xe3p_xpc_gam_grp1_steering_table[] = {
+ { 0x004000, 0x004AFF }, /* GAMREQSTRM, rsvd, STLB, GAMWKRS, GAMREQSTRM */
+ { 0x00F100, 0x00FFFF }, /* GAMWKRS */
+ {},
+};
+
+static const struct xe_mmio_range xe3p_xpc_psmi_grp19_steering_table[] = {
+ { 0x00B500, 0x00B5FF },
+ {},
+};
+
+static const struct xe_mmio_range xe3p_xpc_instance0_steering_table[] = {
+ { 0x00B600, 0x00B6FF }, /* PSMI0 */
+ { 0x00C800, 0x00CFFF }, /* GAMCTRL */
+ { 0x00F000, 0x00F0FF }, /* GAMCTRL */
+ {},
+};
+
static void init_steering_l3bank(struct xe_gt *gt)
{
struct xe_mmio *mmio = &gt->mmio;
@@ -418,6 +452,18 @@ static void init_steering_sqidi_psmi(struct xe_gt *gt)
gt->steering[SQIDI_PSMI].instance_target = select & 0x1;
}
+static void init_steering_psmi(struct xe_gt *gt)
+{
+ gt->steering[PSMI19].group_target = 19;
+ gt->steering[PSMI19].instance_target = 0;
+}
+
+static void init_steering_gam1(struct xe_gt *gt)
+{
+ gt->steering[GAM1].group_target = 1;
+ gt->steering[GAM1].instance_target = 0;
+}
+
static const struct {
const char *name;
void (*init)(struct xe_gt *gt);
@@ -425,9 +471,11 @@ static const struct {
[L3BANK] = { "L3BANK", init_steering_l3bank },
[MSLICE] = { "MSLICE", init_steering_mslice },
[LNCF] = { "LNCF", NULL }, /* initialized by mslice init */
- [DSS] = { "DSS", init_steering_dss },
+ [DSS] = { "DSS / XeCore", init_steering_dss },
[OADDRM] = { "OADDRM / GPMXMT", init_steering_oaddrm },
[SQIDI_PSMI] = { "SQIDI_PSMI", init_steering_sqidi_psmi },
+ [PSMI19] = { "PSMI[19]", init_steering_psmi },
+ [GAM1] = { "GAMWKRS / STLB / GAMREQSTRM", init_steering_gam1 },
[INSTANCE0] = { "INSTANCE 0", NULL },
[IMPLICIT_STEERING] = { "IMPLICIT", NULL },
};
@@ -466,7 +514,18 @@ void xe_gt_mcr_init_early(struct xe_gt *gt)
gt->steering[OADDRM].ranges = xelpmp_oaddrm_steering_table;
}
} else {
- if (GRAPHICS_VER(xe) >= 20) {
+ if (GRAPHICS_VERx100(xe) == 3511) {
+ /*
+ * TODO: there are some ranges in bspec with missing
+ * termination: [0x00B000, 0x00B0FF] and
+ * [0x00D880, 0x00D8FF] (NODE); [0x00B100, 0x00B3FF]
+ * (L3BANK). Update them here once bspec is updated.
+ */
+ gt->steering[DSS].ranges = xe3p_xpc_xecore_steering_table;
+ gt->steering[GAM1].ranges = xe3p_xpc_gam_grp1_steering_table;
+ gt->steering[INSTANCE0].ranges = xe3p_xpc_instance0_steering_table;
+ gt->steering[PSMI19].ranges = xe3p_xpc_psmi_grp19_steering_table;
+ } else if (GRAPHICS_VER(xe) >= 20) {
gt->steering[DSS].ranges = xe2lpg_dss_steering_table;
gt->steering[SQIDI_PSMI].ranges = xe2lpg_sqidi_psmi_steering_table;
gt->steering[INSTANCE0].ranges = xe2lpg_instance0_steering_table;
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 6344b5205c08..c0c0215c0703 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -1484,7 +1484,8 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_NEEDS_2M |
XE_BO_FLAG_PINNED |
- XE_BO_FLAG_PINNED_LATE_RESTORE);
+ XE_BO_FLAG_PINNED_LATE_RESTORE |
+ XE_BO_FLAG_FORCE_USER_VRAM);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -1547,7 +1548,8 @@ int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size
{
int err;
- xe_gt_assert(gt, xe_device_has_lmtt(gt_to_xe(gt)));
+ if (!xe_device_has_lmtt(gt_to_xe(gt)))
+ return -EPERM;
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
if (vfid)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
index 4f7fff892bc0..2e6bd3d1fe1d 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
@@ -18,6 +18,7 @@
#include "xe_gt_sriov_printk.h"
#include "xe_guc_ct.h"
#include "xe_sriov.h"
+#include "xe_sriov_pf_control.h"
#include "xe_sriov_pf_service.h"
#include "xe_tile.h"
@@ -170,6 +171,7 @@ static const char *control_bit_to_string(enum xe_gt_sriov_control_bits bit)
CASE2STR(FLR_SEND_START);
CASE2STR(FLR_WAIT_GUC);
CASE2STR(FLR_GUC_DONE);
+ CASE2STR(FLR_SYNC);
CASE2STR(FLR_RESET_CONFIG);
CASE2STR(FLR_RESET_DATA);
CASE2STR(FLR_RESET_MMIO);
@@ -271,12 +273,19 @@ static bool pf_expect_vf_not_state(struct xe_gt *gt, unsigned int vfid,
return result;
}
+static void pf_track_vf_state(struct xe_gt *gt, unsigned int vfid,
+ enum xe_gt_sriov_control_bits bit,
+ const char *what)
+{
+ xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d) %s\n",
+ vfid, control_bit_to_string(bit), bit, what);
+}
+
static bool pf_enter_vf_state(struct xe_gt *gt, unsigned int vfid,
enum xe_gt_sriov_control_bits bit)
{
if (!test_and_set_bit(bit, pf_peek_vf_state(gt, vfid))) {
- xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d) enter\n",
- vfid, control_bit_to_string(bit), bit);
+ pf_track_vf_state(gt, vfid, bit, "enter");
return true;
}
return false;
@@ -286,8 +295,7 @@ static bool pf_exit_vf_state(struct xe_gt *gt, unsigned int vfid,
enum xe_gt_sriov_control_bits bit)
{
if (test_and_clear_bit(bit, pf_peek_vf_state(gt, vfid))) {
- xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d) exit\n",
- vfid, control_bit_to_string(bit), bit);
+ pf_track_vf_state(gt, vfid, bit, "exit");
return true;
}
return false;
@@ -616,7 +624,7 @@ int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid)
}
if (pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) {
- xe_gt_sriov_info(gt, "VF%u paused!\n", vfid);
+ xe_gt_sriov_dbg(gt, "VF%u paused!\n", vfid);
return 0;
}
@@ -755,7 +763,7 @@ int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid)
return err;
if (pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED)) {
- xe_gt_sriov_info(gt, "VF%u resumed!\n", vfid);
+ xe_gt_sriov_dbg(gt, "VF%u resumed!\n", vfid);
return 0;
}
@@ -896,7 +904,7 @@ int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid)
return err;
if (pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) {
- xe_gt_sriov_info(gt, "VF%u stopped!\n", vfid);
+ xe_gt_sriov_dbg(gt, "VF%u stopped!\n", vfid);
return 0;
}
@@ -934,6 +942,10 @@ int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid)
* : v : | |
* : FLR_GUC_DONE : | |
* : | : | |
+ * : | o--<--sync : | |
+ * : |/ / : | |
+ * : FLR_SYNC--o : | |
+ * : | : | |
* : FLR_RESET_CONFIG---failed--->-----------o--------+-----------o
* : | : | |
* : FLR_RESET_DATA : | |
@@ -1141,12 +1153,38 @@ static bool pf_exit_vf_flr_send_start(struct xe_gt *gt, unsigned int vfid)
return true;
}
+static bool pf_exit_vf_flr_sync(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SYNC))
+ return false;
+
+ pf_enter_vf_flr_reset_config(gt, vfid);
+ return true;
+}
+
+static void pf_enter_vf_flr_sync(struct xe_gt *gt, unsigned int vfid)
+{
+ int ret;
+
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SYNC))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ ret = xe_sriov_pf_control_sync_flr(gt_to_xe(gt), vfid);
+ if (ret < 0) {
+ xe_gt_sriov_dbg_verbose(gt, "FLR checkpoint %pe\n", ERR_PTR(ret));
+ pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SYNC);
+ } else {
+ xe_gt_sriov_dbg_verbose(gt, "FLR checkpoint pass\n");
+ pf_expect_vf_not_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SYNC);
+ }
+}
+
static bool pf_exit_vf_flr_guc_done(struct xe_gt *gt, unsigned int vfid)
{
if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_GUC_DONE))
return false;
- pf_enter_vf_flr_reset_config(gt, vfid);
+ pf_enter_vf_flr_sync(gt, vfid);
return true;
}
@@ -1167,10 +1205,52 @@ static void pf_enter_vf_flr_guc_done(struct xe_gt *gt, unsigned int vfid)
*/
int xe_gt_sriov_pf_control_trigger_flr(struct xe_gt *gt, unsigned int vfid)
{
+ pf_enter_vf_flr_wip(gt, vfid);
+
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_control_sync_flr() - Synchronize on the VF FLR checkpoint.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ * @sync: if true it will allow to exit the checkpoint
+ *
+ * Return: non-zero if FLR checkpoint has been reached, zero if the is no FLR
+ * in progress, or a negative error code on the FLR busy or failed.
+ */
+int xe_gt_sriov_pf_control_sync_flr(struct xe_gt *gt, unsigned int vfid, bool sync)
+{
+ if (sync && pf_exit_vf_flr_sync(gt, vfid))
+ return 1;
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SYNC))
+ return 1;
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WIP))
+ return -EBUSY;
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED))
+ return -EIO;
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_control_wait_flr() - Wait for a VF FLR to complete.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_wait_flr(struct xe_gt *gt, unsigned int vfid)
+{
unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_FLR_WIP);
int err;
- pf_enter_vf_flr_wip(gt, vfid);
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED))
+ return -EIO;
+
+ if (!pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WIP))
+ return 0;
err = pf_wait_vf_wip_done(gt, vfid, timeout);
if (err) {
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h
index c85e64f099cc..8a72ef3778d4 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h
@@ -18,6 +18,8 @@ int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_control_trigger_flr(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_control_sync_flr(struct xe_gt *gt, unsigned int vfid, bool sync);
+int xe_gt_sriov_pf_control_wait_flr(struct xe_gt *gt, unsigned int vfid);
#ifdef CONFIG_PCI_IOV
int xe_gt_sriov_pf_control_process_guc2pf(struct xe_gt *gt, const u32 *msg, u32 len);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
index f02f941b4ad2..c80b7e77f1ad 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
@@ -18,6 +18,7 @@
* @XE_GT_SRIOV_STATE_FLR_SEND_START: indicates that the PF wants to send a FLR START command.
* @XE_GT_SRIOV_STATE_FLR_WAIT_GUC: indicates that the PF awaits for a response from the GuC.
* @XE_GT_SRIOV_STATE_FLR_GUC_DONE: indicates that the PF has received a response from the GuC.
+ * @XE_GT_SRIOV_STATE_FLR_SYNC: indicates that the PF awaits to synchronize with other GuCs.
* @XE_GT_SRIOV_STATE_FLR_RESET_CONFIG: indicates that the PF needs to clear VF's resources.
* @XE_GT_SRIOV_STATE_FLR_RESET_DATA: indicates that the PF needs to clear VF's data.
* @XE_GT_SRIOV_STATE_FLR_RESET_MMIO: indicates that the PF needs to reset VF's registers.
@@ -47,6 +48,7 @@ enum xe_gt_sriov_control_bits {
XE_GT_SRIOV_STATE_FLR_SEND_START,
XE_GT_SRIOV_STATE_FLR_WAIT_GUC,
XE_GT_SRIOV_STATE_FLR_GUC_DONE,
+ XE_GT_SRIOV_STATE_FLR_SYNC,
XE_GT_SRIOV_STATE_FLR_RESET_CONFIG,
XE_GT_SRIOV_STATE_FLR_RESET_DATA,
XE_GT_SRIOV_STATE_FLR_RESET_MMIO,
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
index 3ed245e04d0c..838beb7f6327 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
@@ -23,14 +23,25 @@
#include "xe_gt_sriov_pf_service.h"
#include "xe_pm.h"
#include "xe_sriov_pf.h"
+#include "xe_sriov_pf_provision.h"
/*
- * /sys/kernel/debug/dri/0/
- * ├── gt0 # d_inode->i_private = gt
- * │   ├── pf # d_inode->i_private = gt
- * │   ├── vf1 # d_inode->i_private = VFID(1)
- * :   :
- * │   ├── vfN # d_inode->i_private = VFID(N)
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov # d_inode->i_private = (xe_device*)
+ * │ ├── pf # d_inode->i_private = (xe_device*)
+ * │ │ ├── tile0 # d_inode->i_private = (xe_tile*)
+ * │ │ │ ├── gt0 # d_inode->i_private = (xe_gt*)
+ * │ │ │ ├── gt1 # d_inode->i_private = (xe_gt*)
+ * │ │ ├── tile1
+ * │ │ │ :
+ * │ ├── vf1 # d_inode->i_private = VFID(1)
+ * │ │ ├── tile0 # d_inode->i_private = (xe_tile*)
+ * │ │ │ ├── gt0 # d_inode->i_private = (xe_gt*)
+ * │ │ │ ├── gt1 # d_inode->i_private = (xe_gt*)
+ * │ │ ├── tile1
+ * │ │ │ :
+ * : :
+ * │ ├── vfN # d_inode->i_private = VFID(N)
*/
static void *extract_priv(struct dentry *d)
@@ -40,26 +51,31 @@ static void *extract_priv(struct dentry *d)
static struct xe_gt *extract_gt(struct dentry *d)
{
- return extract_priv(d->d_parent);
+ return extract_priv(d);
+}
+
+static struct xe_device *extract_xe(struct dentry *d)
+{
+ return extract_priv(d->d_parent->d_parent->d_parent);
}
static unsigned int extract_vfid(struct dentry *d)
{
- return extract_priv(d) == extract_gt(d) ? PFID : (uintptr_t)extract_priv(d);
+ void *priv = extract_priv(d->d_parent->d_parent);
+
+ return priv == extract_xe(d) ? PFID : (uintptr_t)priv;
}
/*
- * /sys/kernel/debug/dri/0/
- * ├── gt0
- * │   ├── pf
- * │   │   ├── contexts_provisioned
- * │   │   ├── doorbells_provisioned
- * │   │   ├── runtime_registers
- * │   │   ├── negotiated_versions
- * │   │   ├── adverse_events
- * ├── gt1
- * │   ├── pf
- * │   │   ├── ...
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * : ├── pf
+ * : ├── tile0
+ * : ├── gt0
+ * : ├── contexts_provisioned
+ * ├── doorbells_provisioned
+ * ├── runtime_registers
+ * ├── adverse_events
*/
static const struct drm_info_list pf_info[] = {
@@ -86,48 +102,14 @@ static const struct drm_info_list pf_info[] = {
};
/*
- * /sys/kernel/debug/dri/0/
- * ├── gt0
- * │   ├── pf
- * │   │   ├── ggtt_available
- * │   │   ├── ggtt_provisioned
- */
-
-static const struct drm_info_list pf_ggtt_info[] = {
- {
- "ggtt_available",
- .show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_config_print_available_ggtt,
- },
- {
- "ggtt_provisioned",
- .show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_config_print_ggtt,
- },
-};
-
-/*
- * /sys/kernel/debug/dri/0/
- * ├── gt0
- * │   ├── pf
- * │   │   ├── lmem_provisioned
- */
-
-static const struct drm_info_list pf_lmem_info[] = {
- {
- "lmem_provisioned",
- .show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_config_print_lmem,
- },
-};
-
-/*
- * /sys/kernel/debug/dri/0/
- * ├── gt0
- * │   ├── pf
- * │   │   ├── reset_engine
- * │   │   ├── sample_period
- * │   │   ├── sched_if_idle
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * : ├── pf
+ * : ├── tile0
+ * : ├── gt0
+ * : ├── reset_engine
+ * ├── sample_period
+ * ├── sched_if_idle
*/
#define DEFINE_SRIOV_GT_POLICY_DEBUGFS_ATTRIBUTE(POLICY, TYPE, FORMAT) \
@@ -143,6 +125,8 @@ static int POLICY##_set(void *data, u64 val) \
\
xe_pm_runtime_get(xe); \
err = xe_gt_sriov_pf_policy_set_##POLICY(gt, val); \
+ if (!err) \
+ xe_sriov_pf_provision_set_custom_mode(xe); \
xe_pm_runtime_put(xe); \
\
return err; \
@@ -173,24 +157,24 @@ static void pf_add_policy_attrs(struct xe_gt *gt, struct dentry *parent)
}
/*
- * /sys/kernel/debug/dri/0/
- * ├── gt0
- * │   ├── pf
- * │   │   ├── ggtt_spare
- * │   │   ├── lmem_spare
- * │   │   ├── doorbells_spare
- * │   │   ├── contexts_spare
- * │   │   ├── exec_quantum_ms
- * │   │   ├── preempt_timeout_us
- * │   │   ├── sched_priority
- * │   ├── vf1
- * │   │   ├── ggtt_quota
- * │   │   ├── lmem_quota
- * │   │   ├── doorbells_quota
- * │   │   ├── contexts_quota
- * │   │   ├── exec_quantum_ms
- * │   │   ├── preempt_timeout_us
- * │   │   ├── sched_priority
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * : ├── pf
+ * │ ├── tile0
+ * │ : ├── gt0
+ * │ : ├── doorbells_spare
+ * │ ├── contexts_spare
+ * │ ├── exec_quantum_ms
+ * │ ├── preempt_timeout_us
+ * │ ├── sched_priority
+ * ├── vf1
+ * : ├── tile0
+ * : ├── gt0
+ * : ├── doorbells_quota
+ * ├── contexts_quota
+ * ├── exec_quantum_ms
+ * ├── preempt_timeout_us
+ * ├── sched_priority
*/
#define DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(CONFIG, TYPE, FORMAT) \
@@ -208,6 +192,8 @@ static int CONFIG##_set(void *data, u64 val) \
xe_pm_runtime_get(xe); \
err = xe_sriov_pf_wait_ready(xe) ?: \
xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \
+ if (!err) \
+ xe_sriov_pf_provision_set_custom_mode(xe); \
xe_pm_runtime_put(xe); \
\
return err; \
@@ -224,8 +210,6 @@ static int CONFIG##_get(void *data, u64 *val) \
\
DEFINE_DEBUGFS_ATTRIBUTE(CONFIG##_fops, CONFIG##_get, CONFIG##_set, FORMAT)
-DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(ggtt, u64, "%llu\n");
-DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(lmem, u64, "%llu\n");
DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(ctxs, u32, "%llu\n");
DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(dbs, u32, "%llu\n");
DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(exec_quantum, u32, "%llu\n");
@@ -233,22 +217,26 @@ DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(preempt_timeout, u32, "%llu\n");
DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(sched_priority, u32, "%llu\n");
/*
- * /sys/kernel/debug/dri/0/
- * ├── gt0
- * │   ├── pf
- * │   │   ├── threshold_cat_error_count
- * │   │   ├── threshold_doorbell_time_us
- * │   │   ├── threshold_engine_reset_count
- * │   │   ├── threshold_guc_time_us
- * │   │   ├── threshold_irq_time_us
- * │   │   ├── threshold_page_fault_count
- * │   ├── vf1
- * │   │   ├── threshold_cat_error_count
- * │   │   ├── threshold_doorbell_time_us
- * │   │   ├── threshold_engine_reset_count
- * │   │   ├── threshold_guc_time_us
- * │   │   ├── threshold_irq_time_us
- * │   │   ├── threshold_page_fault_count
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * : ├── pf
+ * │ ├── tile0
+ * │ : ├── gt0
+ * │ : ├── threshold_cat_error_count
+ * │ ├── threshold_doorbell_time_us
+ * │ ├── threshold_engine_reset_count
+ * │ ├── threshold_guc_time_us
+ * │ ├── threshold_irq_time_us
+ * │ ├── threshold_page_fault_count
+ * ├── vf1
+ * : ├── tile0
+ * : ├── gt0
+ * : ├── threshold_cat_error_count
+ * ├── threshold_doorbell_time_us
+ * ├── threshold_engine_reset_count
+ * ├── threshold_guc_time_us
+ * ├── threshold_irq_time_us
+ * ├── threshold_page_fault_count
*/
static int set_threshold(void *data, u64 val, enum xe_guc_klv_threshold_index index)
@@ -263,6 +251,8 @@ static int set_threshold(void *data, u64 val, enum xe_guc_klv_threshold_index in
xe_pm_runtime_get(xe);
err = xe_gt_sriov_pf_config_set_threshold(gt, vfid, index, val);
+ if (!err)
+ xe_sriov_pf_provision_set_custom_mode(xe);
xe_pm_runtime_put(xe);
return err;
@@ -302,13 +292,6 @@ static void pf_add_config_attrs(struct xe_gt *gt, struct dentry *parent, unsigne
xe_gt_assert(gt, gt == extract_gt(parent));
xe_gt_assert(gt, vfid == extract_vfid(parent));
- if (xe_gt_is_main_type(gt)) {
- debugfs_create_file_unsafe(vfid ? "ggtt_quota" : "ggtt_spare",
- 0644, parent, parent, &ggtt_fops);
- if (xe_device_has_lmtt(gt_to_xe(gt)))
- debugfs_create_file_unsafe(vfid ? "lmem_quota" : "lmem_spare",
- 0644, parent, parent, &lmem_fops);
- }
debugfs_create_file_unsafe(vfid ? "doorbells_quota" : "doorbells_spare",
0644, parent, parent, &dbs_fops);
debugfs_create_file_unsafe(vfid ? "contexts_quota" : "contexts_spare",
@@ -329,10 +312,12 @@ static void pf_add_config_attrs(struct xe_gt *gt, struct dentry *parent, unsigne
}
/*
- * /sys/kernel/debug/dri/0/
- * ├── gt0
- * │   ├── vf1
- * │   │   ├── control { stop, pause, resume }
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * : ├── vf1
+ * : ├── tile0
+ * : ├── gt0
+ * : ├── control { stop, pause, resume }
*/
static const struct {
@@ -409,11 +394,14 @@ static const struct file_operations control_ops = {
};
/*
- * /sys/kernel/debug/dri/0/
- * ├── gt0
- * │   ├── vf1
- * │   │   ├── guc_state
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * : ├── vf1
+ * : ├── tile0
+ * : ├── gt0
+ * : ├── guc_state
*/
+
static ssize_t guc_state_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
@@ -447,20 +435,27 @@ static const struct file_operations guc_state_ops = {
};
/*
- * /sys/kernel/debug/dri/0/
- * ├── gt0
- * │   ├── vf1
- * │   │   ├── config_blob
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * : ├── vf1
+ * : ├── tile0
+ * : ├── gt0
+ * : ├── config_blob
*/
-static ssize_t config_blob_read(struct file *file, char __user *buf,
- size_t count, loff_t *pos)
+
+struct config_blob_data {
+ size_t size;
+ u8 blob[];
+};
+
+static int config_blob_open(struct inode *inode, struct file *file)
{
struct dentry *dent = file_dentry(file);
struct dentry *parent = dent->d_parent;
struct xe_gt *gt = extract_gt(parent);
unsigned int vfid = extract_vfid(parent);
+ struct config_blob_data *cbd;
ssize_t ret;
- void *tmp;
ret = xe_gt_sriov_pf_config_save(gt, vfid, NULL, 0);
if (!ret)
@@ -468,16 +463,27 @@ static ssize_t config_blob_read(struct file *file, char __user *buf,
if (ret < 0)
return ret;
- tmp = kzalloc(ret, GFP_KERNEL);
- if (!tmp)
+ cbd = kzalloc(struct_size(cbd, blob, ret), GFP_KERNEL);
+ if (!cbd)
return -ENOMEM;
- ret = xe_gt_sriov_pf_config_save(gt, vfid, tmp, ret);
- if (ret > 0)
- ret = simple_read_from_buffer(buf, count, pos, tmp, ret);
+ ret = xe_gt_sriov_pf_config_save(gt, vfid, cbd->blob, ret);
+ if (ret < 0) {
+ kfree(cbd);
+ return ret;
+ }
- kfree(tmp);
- return ret;
+ cbd->size = ret;
+ file->private_data = cbd;
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t config_blob_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct config_blob_data *cbd = file->private_data;
+
+ return simple_read_from_buffer(buf, count, pos, cbd->blob, cbd->size);
}
static ssize_t config_blob_write(struct file *file, const char __user *buf,
@@ -514,80 +520,150 @@ static ssize_t config_blob_write(struct file *file, const char __user *buf,
return ret;
}
+static int config_blob_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
static const struct file_operations config_blob_ops = {
.owner = THIS_MODULE,
+ .open = config_blob_open,
.read = config_blob_read,
.write = config_blob_write,
- .llseek = default_llseek,
+ .release = config_blob_release,
};
-/**
- * xe_gt_sriov_pf_debugfs_register - Register SR-IOV PF specific entries in GT debugfs.
- * @gt: the &xe_gt to register
- * @root: the &dentry that represents the GT directory
- *
- * Register SR-IOV PF entries that are GT related and must be shown under GT debugfs.
- */
-void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root)
+static void pf_add_compat_attrs(struct xe_gt *gt, struct dentry *dent, unsigned int vfid)
{
struct xe_device *xe = gt_to_xe(gt);
- struct drm_minor *minor = xe->drm.primary;
- int n, totalvfs = xe_sriov_pf_get_totalvfs(xe);
- struct dentry *pfdentry;
- struct dentry *vfdentry;
- char buf[14]; /* should be enough up to "vf%u\0" for 2^32 - 1 */
-
- xe_gt_assert(gt, IS_SRIOV_PF(xe));
- xe_gt_assert(gt, root->d_inode->i_private == gt);
- /*
- * /sys/kernel/debug/dri/0/
- * ├── gt0
- * │   ├── pf
- */
- pfdentry = debugfs_create_dir("pf", root);
- if (IS_ERR(pfdentry))
+ if (!xe_gt_is_main_type(gt))
return;
- pfdentry->d_inode->i_private = gt;
-
- drm_debugfs_create_files(pf_info, ARRAY_SIZE(pf_info), pfdentry, minor);
- if (xe_gt_is_main_type(gt)) {
- drm_debugfs_create_files(pf_ggtt_info,
- ARRAY_SIZE(pf_ggtt_info),
- pfdentry, minor);
- if (xe_device_has_lmtt(gt_to_xe(gt)))
- drm_debugfs_create_files(pf_lmem_info,
- ARRAY_SIZE(pf_lmem_info),
- pfdentry, minor);
+
+ if (vfid) {
+ debugfs_create_symlink("ggtt_quota", dent, "../ggtt_quota");
+ if (xe_device_has_lmtt(xe))
+ debugfs_create_symlink("lmem_quota", dent, "../vram_quota");
+ } else {
+ debugfs_create_symlink("ggtt_spare", dent, "../ggtt_spare");
+ debugfs_create_symlink("ggtt_available", dent, "../ggtt_available");
+ debugfs_create_symlink("ggtt_provisioned", dent, "../ggtt_provisioned");
+ if (xe_device_has_lmtt(xe)) {
+ debugfs_create_symlink("lmem_spare", dent, "../vram_spare");
+ debugfs_create_symlink("lmem_provisioned", dent, "../vram_provisioned");
+ }
}
+}
- pf_add_policy_attrs(gt, pfdentry);
- pf_add_config_attrs(gt, pfdentry, PFID);
-
- for (n = 1; n <= totalvfs; n++) {
- /*
- * /sys/kernel/debug/dri/0/
- * ├── gt0
- * │   ├── vf1
- * │   ├── vf2
- */
- snprintf(buf, sizeof(buf), "vf%u", n);
- vfdentry = debugfs_create_dir(buf, root);
- if (IS_ERR(vfdentry))
- break;
- vfdentry->d_inode->i_private = (void *)(uintptr_t)n;
+static void pf_populate_gt(struct xe_gt *gt, struct dentry *dent, unsigned int vfid)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ struct drm_minor *minor = xe->drm.primary;
- pf_add_config_attrs(gt, vfdentry, VFID(n));
- debugfs_create_file("control", 0600, vfdentry, NULL, &control_ops);
+ if (vfid) {
+ pf_add_config_attrs(gt, dent, vfid);
+
+ debugfs_create_file("control", 0600, dent, NULL, &control_ops);
/* for testing/debugging purposes only! */
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
debugfs_create_file("guc_state",
IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 0600 : 0400,
- vfdentry, NULL, &guc_state_ops);
+ dent, NULL, &guc_state_ops);
debugfs_create_file("config_blob",
IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 0600 : 0400,
- vfdentry, NULL, &config_blob_ops);
+ dent, NULL, &config_blob_ops);
}
+
+ } else {
+ pf_add_config_attrs(gt, dent, PFID);
+ pf_add_policy_attrs(gt, dent);
+
+ drm_debugfs_create_files(pf_info, ARRAY_SIZE(pf_info), dent, minor);
+ }
+
+ /* for backward compatibility only */
+ pf_add_compat_attrs(gt, dent, vfid);
+}
+
+/**
+ * xe_gt_sriov_pf_debugfs_populate() - Create SR-IOV GT-level debugfs directories and files.
+ * @gt: the &xe_gt to register
+ * @parent: the parent &dentry that represents a &xe_tile
+ * @vfid: the VF identifier
+ *
+ * Add to the @parent directory new debugfs directory that will represent a @gt and
+ * populate it with GT files that are related to the SR-IOV @vfid function.
+ *
+ * This function can only be called on PF.
+ */
+void xe_gt_sriov_pf_debugfs_populate(struct xe_gt *gt, struct dentry *parent, unsigned int vfid)
+{
+ struct dentry *dent;
+ char name[8]; /* should be enough up to "gt%u\0" for 2^8 - 1 */
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, extract_priv(parent) == gt->tile);
+ xe_gt_assert(gt, extract_priv(parent->d_parent) == gt_to_xe(gt) ||
+ (uintptr_t)extract_priv(parent->d_parent) == vfid);
+
+ /*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * │ ├── pf
+ * │ │ ├── tile0 # parent
+ * │ │ │ ├── gt0 # d_inode->i_private = (xe_gt*)
+ * │ │ │ ├── gt1
+ * │ │ : :
+ * │ ├── vf1
+ * │ │ ├── tile0 # parent
+ * │ │ │ ├── gt0 # d_inode->i_private = (xe_gt*)
+ * │ │ │ ├── gt1
+ * │ : : :
+ */
+ snprintf(name, sizeof(name), "gt%u", gt->info.id);
+ dent = debugfs_create_dir(name, parent);
+ if (IS_ERR(dent))
+ return;
+ dent->d_inode->i_private = gt;
+
+ xe_gt_assert(gt, extract_gt(dent) == gt);
+ xe_gt_assert(gt, extract_vfid(dent) == vfid);
+
+ pf_populate_gt(gt, dent, vfid);
+}
+
+static void pf_add_links(struct xe_gt *gt, struct dentry *dent)
+{
+ unsigned int totalvfs = xe_gt_sriov_pf_get_totalvfs(gt);
+ unsigned int vfid;
+ char name[16]; /* should be more than enough for "vf%u\0" and VFID(UINT_MAX) */
+ char symlink[64]; /* should be more enough for "../../sriov/vf%u/tile%u/gt%u\0" */
+
+ for (vfid = 0; vfid <= totalvfs; vfid++) {
+ if (vfid)
+ snprintf(name, sizeof(name), "vf%u", vfid);
+ else
+ snprintf(name, sizeof(name), "pf");
+ snprintf(symlink, sizeof(symlink), "../../sriov/%s/tile%u/gt%u",
+ name, gt->tile->id, gt->info.id);
+ debugfs_create_symlink(name, dent, symlink);
}
}
+
+/**
+ * xe_gt_sriov_pf_debugfs_register - Register SR-IOV PF specific entries in GT debugfs.
+ * @gt: the &xe_gt to register
+ * @dent: the &dentry that represents the GT directory
+ *
+ * Instead of actual files, create symlinks for PF and each VF to their GT specific
+ * attributes that should be already exposed in the dedicated debugfs SR-IOV tree.
+ */
+void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *dent)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, dent->d_inode->i_private == gt);
+
+ pf_add_links(gt, dent);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.h
index 038cc8ddc244..82ff3b7f0532 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.h
@@ -11,6 +11,7 @@ struct dentry;
#ifdef CONFIG_PCI_IOV
void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root);
+void xe_gt_sriov_pf_debugfs_populate(struct xe_gt *gt, struct dentry *parent, unsigned int vfid);
#else
static inline void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root) { }
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_printk.h b/drivers/gpu/drm/xe/xe_gt_sriov_printk.h
index 17624b16300a..d3457d608db8 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_printk.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_printk.h
@@ -7,10 +7,13 @@
#define _XE_GT_SRIOV_PRINTK_H_
#include "xe_gt_printk.h"
-#include "xe_sriov_printk.h"
+#include "xe_tile_sriov_printk.h"
+
+#define __XE_GT_SRIOV_PRINTK_FMT(_gt, _fmt, ...) \
+ __XE_TILE_SRIOV_PRINTK_FMT((_gt)->tile, __XE_GT_PRINTK_FMT((_gt), _fmt, ##__VA_ARGS__))
#define __xe_gt_sriov_printk(gt, _level, fmt, ...) \
- xe_gt_printk((gt), _level, "%s" fmt, xe_sriov_printk_prefix(gt_to_xe(gt)), ##__VA_ARGS__)
+ xe_sriov_##_level(gt_to_xe(gt), __XE_GT_SRIOV_PRINTK_FMT((gt), fmt, ##__VA_ARGS__))
#define xe_gt_sriov_err(_gt, _fmt, ...) \
__xe_gt_sriov_printk(_gt, err, _fmt, ##__VA_ARGS__)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 0461d5513487..46518e629ba3 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -23,12 +23,20 @@
#include "xe_gt_sriov_vf.h"
#include "xe_gt_sriov_vf_types.h"
#include "xe_guc.h"
+#include "xe_guc_ct.h"
#include "xe_guc_hxg_helpers.h"
#include "xe_guc_relay.h"
+#include "xe_guc_submit.h"
+#include "xe_irq.h"
#include "xe_lrc.h"
+#include "xe_memirq.h"
#include "xe_mmio.h"
+#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_sriov_vf.h"
+#include "xe_sriov_vf_ccs.h"
+#include "xe_tile_sriov_vf.h"
+#include "xe_tlb_inval.h"
#include "xe_uc_fw.h"
#include "xe_wopcm.h"
@@ -307,13 +315,13 @@ static int guc_action_vf_notify_resfix_done(struct xe_guc *guc)
}
/**
- * xe_gt_sriov_vf_notify_resfix_done - Notify GuC about resource fixups apply completed.
+ * vf_notify_resfix_done - Notify GuC about resource fixups apply completed.
* @gt: the &xe_gt struct instance linked to target GuC
*
* Returns: 0 if the operation completed successfully, or a negative error
* code otherwise.
*/
-int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt)
+static int vf_notify_resfix_done(struct xe_gt *gt)
{
struct xe_guc *guc = &gt->uc.guc;
int err;
@@ -433,13 +441,17 @@ u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt)
static int vf_get_ggtt_info(struct xe_gt *gt)
{
- struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_ggtt *ggtt = tile->mem.ggtt;
struct xe_guc *guc = &gt->uc.guc;
- u64 start, size;
+ u64 start, size, ggtt_size;
+ s64 shift;
int err;
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ guard(mutex)(&ggtt->lock);
+
err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_START_KEY, &start);
if (unlikely(err))
return err;
@@ -448,28 +460,44 @@ static int vf_get_ggtt_info(struct xe_gt *gt)
if (unlikely(err))
return err;
- if (config->ggtt_size && config->ggtt_size != size) {
+ if (!size)
+ return -ENODATA;
+
+ ggtt_size = xe_tile_sriov_vf_ggtt(tile);
+ if (ggtt_size && ggtt_size != size) {
xe_gt_sriov_err(gt, "Unexpected GGTT reassignment: %lluK != %lluK\n",
- size / SZ_1K, config->ggtt_size / SZ_1K);
+ size / SZ_1K, ggtt_size / SZ_1K);
return -EREMCHG;
}
xe_gt_sriov_dbg_verbose(gt, "GGTT %#llx-%#llx = %lluK\n",
start, start + size - 1, size / SZ_1K);
- config->ggtt_shift = start - (s64)config->ggtt_base;
- config->ggtt_base = start;
- config->ggtt_size = size;
+ shift = start - (s64)xe_tile_sriov_vf_ggtt_base(tile);
+ xe_tile_sriov_vf_ggtt_base_store(tile, start);
+ xe_tile_sriov_vf_ggtt_store(tile, size);
- return config->ggtt_size ? 0 : -ENODATA;
+ if (shift && shift != start) {
+ xe_gt_sriov_info(gt, "Shifting GGTT base by %lld to 0x%016llx\n",
+ shift, start);
+ xe_tile_sriov_vf_fixup_ggtt_nodes_locked(gt_to_tile(gt), shift);
+ }
+
+ if (xe_sriov_vf_migration_supported(gt_to_xe(gt))) {
+ WRITE_ONCE(gt->sriov.vf.migration.ggtt_need_fixes, false);
+ smp_wmb(); /* Ensure above write visible before wake */
+ wake_up_all(&gt->sriov.vf.migration.wq);
+ }
+
+ return 0;
}
static int vf_get_lmem_info(struct xe_gt *gt)
{
- struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
+ struct xe_tile *tile = gt_to_tile(gt);
struct xe_guc *guc = &gt->uc.guc;
char size_str[10];
- u64 size;
+ u64 size, lmem_size;
int err;
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
@@ -478,18 +506,19 @@ static int vf_get_lmem_info(struct xe_gt *gt)
if (unlikely(err))
return err;
- if (config->lmem_size && config->lmem_size != size) {
+ lmem_size = xe_tile_sriov_vf_lmem(tile);
+ if (lmem_size && lmem_size != size) {
xe_gt_sriov_err(gt, "Unexpected LMEM reassignment: %lluM != %lluM\n",
- size / SZ_1M, config->lmem_size / SZ_1M);
+ size / SZ_1M, lmem_size / SZ_1M);
return -EREMCHG;
}
string_get_size(size, 1, STRING_UNITS_2, size_str, sizeof(size_str));
xe_gt_sriov_dbg_verbose(gt, "LMEM %lluM %s\n", size / SZ_1M, size_str);
- config->lmem_size = size;
+ xe_tile_sriov_vf_lmem_store(tile, size);
- return config->lmem_size ? 0 : -ENODATA;
+ return size ? 0 : -ENODATA;
}
static int vf_get_submission_cfg(struct xe_gt *gt)
@@ -540,7 +569,9 @@ static void vf_cache_gmdid(struct xe_gt *gt)
* xe_gt_sriov_vf_query_config - Query SR-IOV config data over MMIO.
* @gt: the &xe_gt
*
- * This function is for VF use only.
+ * This function is for VF use only. This function may shift the GGTT and is
+ * performed under GGTT lock, making this step visible to all GTs that share a
+ * GGTT.
*
* Return: 0 on success or a negative error code on failure.
*/
@@ -586,75 +617,6 @@ u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt)
return gt->sriov.vf.self_config.num_ctxs;
}
-/**
- * xe_gt_sriov_vf_lmem - VF LMEM configuration.
- * @gt: the &xe_gt
- *
- * This function is for VF use only.
- *
- * Return: size of the LMEM assigned to VF.
- */
-u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt)
-{
- xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
- xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
- xe_gt_assert(gt, gt->sriov.vf.self_config.lmem_size);
-
- return gt->sriov.vf.self_config.lmem_size;
-}
-
-/**
- * xe_gt_sriov_vf_ggtt - VF GGTT configuration.
- * @gt: the &xe_gt
- *
- * This function is for VF use only.
- *
- * Return: size of the GGTT assigned to VF.
- */
-u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt)
-{
- xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
- xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
- xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size);
-
- return gt->sriov.vf.self_config.ggtt_size;
-}
-
-/**
- * xe_gt_sriov_vf_ggtt_base - VF GGTT base offset.
- * @gt: the &xe_gt
- *
- * This function is for VF use only.
- *
- * Return: base offset of the GGTT assigned to VF.
- */
-u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt)
-{
- xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
- xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
- xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size);
-
- return gt->sriov.vf.self_config.ggtt_base;
-}
-
-/**
- * xe_gt_sriov_vf_ggtt_shift - Return shift in GGTT range due to VF migration
- * @gt: the &xe_gt struct instance
- *
- * This function is for VF use only.
- *
- * Return: The shift value; could be negative
- */
-s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt)
-{
- struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
-
- xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
- xe_gt_assert(gt, xe_gt_is_main_type(gt));
-
- return config->ggtt_shift;
-}
-
static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor)
{
u32 request[VF2PF_HANDSHAKE_REQUEST_MSG_LEN] = {
@@ -755,7 +717,7 @@ failed:
* xe_gt_sriov_vf_default_lrcs_hwsp_rebase - Update GGTT references in HWSP of default LRCs.
* @gt: the &xe_gt struct instance
*/
-void xe_gt_sriov_vf_default_lrcs_hwsp_rebase(struct xe_gt *gt)
+static void xe_gt_sriov_vf_default_lrcs_hwsp_rebase(struct xe_gt *gt)
{
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
@@ -764,6 +726,31 @@ void xe_gt_sriov_vf_default_lrcs_hwsp_rebase(struct xe_gt *gt)
xe_default_lrc_update_memirq_regs_with_address(hwe);
}
+static void vf_start_migration_recovery(struct xe_gt *gt)
+{
+ bool started;
+
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+
+ spin_lock(&gt->sriov.vf.migration.lock);
+
+ if (!gt->sriov.vf.migration.recovery_queued ||
+ !gt->sriov.vf.migration.recovery_teardown) {
+ gt->sriov.vf.migration.recovery_queued = true;
+ WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, true);
+ WRITE_ONCE(gt->sriov.vf.migration.ggtt_need_fixes, true);
+ smp_wmb(); /* Ensure above writes visable before wake */
+
+ xe_guc_ct_wake_waiters(&gt->uc.guc.ct);
+
+ started = queue_work(gt->ordered_wq, &gt->sriov.vf.migration.worker);
+ xe_gt_sriov_info(gt, "VF migration recovery %s\n", started ?
+ "scheduled" : "already in progress");
+ }
+
+ spin_unlock(&gt->sriov.vf.migration.lock);
+}
+
/**
* xe_gt_sriov_vf_migrated_event_handler - Start a VF migration recovery,
* or just mark that a GuC is ready for it.
@@ -776,16 +763,15 @@ void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt)
struct xe_device *xe = gt_to_xe(gt);
xe_gt_assert(gt, IS_SRIOV_VF(xe));
+ xe_gt_assert(gt, xe_gt_sriov_vf_recovery_pending(gt));
- set_bit(gt->info.id, &xe->sriov.vf.migration.gt_flags);
- /*
- * We need to be certain that if all flags were set, at least one
- * thread will notice that and schedule the recovery.
- */
- smp_mb__after_atomic();
+ if (!xe_sriov_vf_migration_supported(xe)) {
+ xe_gt_sriov_err(gt, "migration not supported\n");
+ return;
+ }
xe_gt_sriov_info(gt, "ready for recovery after migration\n");
- xe_sriov_vf_start_migration_recovery(xe);
+ vf_start_migration_recovery(gt);
}
static bool vf_is_negotiated(struct xe_gt *gt, u16 major, u16 minor)
@@ -1040,22 +1026,25 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
struct xe_device *xe = gt_to_xe(gt);
+ u64 lmem_size;
char buf[10];
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
- drm_printf(p, "GGTT range:\t%#llx-%#llx\n",
- config->ggtt_base,
- config->ggtt_base + config->ggtt_size - 1);
-
- string_get_size(config->ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf));
- drm_printf(p, "GGTT size:\t%llu (%s)\n", config->ggtt_size, buf);
+ if (xe_gt_is_main_type(gt)) {
+ u64 ggtt_size = xe_tile_sriov_vf_ggtt(gt_to_tile(gt));
+ u64 ggtt_base = xe_tile_sriov_vf_ggtt_base(gt_to_tile(gt));
- drm_printf(p, "GGTT shift on last restore:\t%lld\n", config->ggtt_shift);
+ drm_printf(p, "GGTT range:\t%#llx-%#llx\n",
+ ggtt_base, ggtt_base + ggtt_size - 1);
+ string_get_size(ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf));
+ drm_printf(p, "GGTT size:\t%llu (%s)\n", ggtt_size, buf);
- if (IS_DGFX(xe) && xe_gt_is_main_type(gt)) {
- string_get_size(config->lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf));
- drm_printf(p, "LMEM size:\t%llu (%s)\n", config->lmem_size, buf);
+ if (IS_DGFX(xe)) {
+ lmem_size = xe_tile_sriov_vf_lmem(gt_to_tile(gt));
+ string_get_size(lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf));
+ drm_printf(p, "LMEM size:\t%llu (%s)\n", lmem_size, buf);
+ }
}
drm_printf(p, "GuC contexts:\t%u\n", config->num_ctxs);
@@ -1118,3 +1107,276 @@ void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p)
drm_printf(p, "\thandshake:\t%u.%u\n",
pf_version->major, pf_version->minor);
}
+
+static bool vf_post_migration_shutdown(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ /*
+ * On platforms where CCS must be restored by the primary GT, the media
+ * GT's VF post-migration recovery must run afterward. Detect this case
+ * and re-queue the media GT's restore work item if necessary.
+ */
+ if (xe->info.needs_shared_vf_gt_wq && xe_gt_is_media_type(gt)) {
+ struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
+
+ if (xe_gt_sriov_vf_recovery_pending(primary_gt))
+ return true;
+ }
+
+ spin_lock_irq(&gt->sriov.vf.migration.lock);
+ gt->sriov.vf.migration.recovery_queued = false;
+ spin_unlock_irq(&gt->sriov.vf.migration.lock);
+
+ xe_guc_ct_flush_and_stop(&gt->uc.guc.ct);
+ xe_guc_submit_pause(&gt->uc.guc);
+ xe_tlb_inval_reset(&gt->tlb_inval);
+
+ return false;
+}
+
+static size_t post_migration_scratch_size(struct xe_device *xe)
+{
+ return max(xe_lrc_reg_size(xe), LRC_WA_BB_SIZE);
+}
+
+static int vf_post_migration_fixups(struct xe_gt *gt)
+{
+ void *buf = gt->sriov.vf.migration.scratch;
+ int err;
+
+ /* xe_gt_sriov_vf_query_config will fixup the GGTT addresses */
+ err = xe_gt_sriov_vf_query_config(gt);
+ if (err)
+ return err;
+
+ if (xe_gt_is_main_type(gt))
+ xe_sriov_vf_ccs_rebase(gt_to_xe(gt));
+
+ xe_gt_sriov_vf_default_lrcs_hwsp_rebase(gt);
+ err = xe_guc_contexts_hwsp_rebase(&gt->uc.guc, buf);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void vf_post_migration_rearm(struct xe_gt *gt)
+{
+ xe_guc_ct_restart(&gt->uc.guc.ct);
+ xe_guc_submit_unpause_prepare(&gt->uc.guc);
+}
+
+static void vf_post_migration_kickstart(struct xe_gt *gt)
+{
+ xe_guc_submit_unpause(&gt->uc.guc);
+}
+
+static void vf_post_migration_abort(struct xe_gt *gt)
+{
+ spin_lock_irq(&gt->sriov.vf.migration.lock);
+ WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, false);
+ WRITE_ONCE(gt->sriov.vf.migration.ggtt_need_fixes, false);
+ spin_unlock_irq(&gt->sriov.vf.migration.lock);
+
+ wake_up_all(&gt->sriov.vf.migration.wq);
+
+ xe_guc_submit_pause_abort(&gt->uc.guc);
+}
+
+static int vf_post_migration_notify_resfix_done(struct xe_gt *gt)
+{
+ bool skip_resfix = false;
+
+ spin_lock_irq(&gt->sriov.vf.migration.lock);
+ if (gt->sriov.vf.migration.recovery_queued) {
+ skip_resfix = true;
+ xe_gt_sriov_dbg(gt, "another recovery imminent, resfix skipped\n");
+ } else {
+ WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, false);
+ }
+ spin_unlock_irq(&gt->sriov.vf.migration.lock);
+
+ if (skip_resfix)
+ return -EAGAIN;
+
+ /*
+ * Make sure interrupts on the new HW are properly set. The GuC IRQ
+ * must be working at this point, since the recovery did started,
+ * but the rest was not enabled using the procedure from spec.
+ */
+ xe_irq_resume(gt_to_xe(gt));
+
+ return vf_notify_resfix_done(gt);
+}
+
+static void vf_post_migration_recovery(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int err;
+ bool retry;
+
+ xe_gt_sriov_dbg(gt, "migration recovery in progress\n");
+
+ xe_pm_runtime_get(xe);
+ retry = vf_post_migration_shutdown(gt);
+ if (retry)
+ goto queue;
+
+ if (!xe_sriov_vf_migration_supported(xe)) {
+ xe_gt_sriov_err(gt, "migration is not supported\n");
+ err = -ENOTRECOVERABLE;
+ goto fail;
+ }
+
+ err = vf_post_migration_fixups(gt);
+ if (err)
+ goto fail;
+
+ vf_post_migration_rearm(gt);
+
+ err = vf_post_migration_notify_resfix_done(gt);
+ if (err && err != -EAGAIN)
+ goto fail;
+
+ vf_post_migration_kickstart(gt);
+
+ xe_pm_runtime_put(xe);
+ xe_gt_sriov_notice(gt, "migration recovery ended\n");
+ return;
+fail:
+ vf_post_migration_abort(gt);
+ xe_pm_runtime_put(xe);
+ xe_gt_sriov_err(gt, "migration recovery failed (%pe)\n", ERR_PTR(err));
+ xe_device_declare_wedged(xe);
+ return;
+
+queue:
+ xe_gt_sriov_info(gt, "Re-queuing migration recovery\n");
+ queue_work(gt->ordered_wq, &gt->sriov.vf.migration.worker);
+ xe_pm_runtime_put(xe);
+}
+
+static void migration_worker_func(struct work_struct *w)
+{
+ struct xe_gt *gt = container_of(w, struct xe_gt,
+ sriov.vf.migration.worker);
+
+ vf_post_migration_recovery(gt);
+}
+
+static void vf_migration_fini(void *arg)
+{
+ struct xe_gt *gt = arg;
+
+ spin_lock_irq(&gt->sriov.vf.migration.lock);
+ gt->sriov.vf.migration.recovery_teardown = true;
+ spin_unlock_irq(&gt->sriov.vf.migration.lock);
+
+ cancel_work_sync(&gt->sriov.vf.migration.worker);
+}
+
+/**
+ * xe_gt_sriov_vf_init_early() - GT VF init early
+ * @gt: the &xe_gt
+ *
+ * Return 0 on success, errno on failure
+ */
+int xe_gt_sriov_vf_init_early(struct xe_gt *gt)
+{
+ void *buf;
+
+ if (!xe_sriov_vf_migration_supported(gt_to_xe(gt)))
+ return 0;
+
+ buf = drmm_kmalloc(&gt_to_xe(gt)->drm,
+ post_migration_scratch_size(gt_to_xe(gt)),
+ GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ gt->sriov.vf.migration.scratch = buf;
+ spin_lock_init(&gt->sriov.vf.migration.lock);
+ INIT_WORK(&gt->sriov.vf.migration.worker, migration_worker_func);
+ init_waitqueue_head(&gt->sriov.vf.migration.wq);
+
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_vf_init() - GT VF init
+ * @gt: the &xe_gt
+ *
+ * Return 0 on success, errno on failure
+ */
+int xe_gt_sriov_vf_init(struct xe_gt *gt)
+{
+ if (!xe_sriov_vf_migration_supported(gt_to_xe(gt)))
+ return 0;
+
+ /*
+ * We want to tear down the VF post-migration early during driver
+ * unload; therefore, we add this finalization action later during
+ * driver load.
+ */
+ return devm_add_action_or_reset(gt_to_xe(gt)->drm.dev,
+ vf_migration_fini, gt);
+}
+
+/**
+ * xe_gt_sriov_vf_recovery_pending() - VF post migration recovery pending
+ * @gt: the &xe_gt
+ *
+ * The return value of this function must be immediately visible upon vCPU
+ * unhalt and must persist until RESFIX_DONE is issued. This guarantee is
+ * currently implemented only for platforms that support memirq. If non-memirq
+ * platforms begin to support VF migration, this function will need to be
+ * updated accordingly.
+ *
+ * Return: True if VF post migration recovery is pending, False otherwise
+ */
+bool xe_gt_sriov_vf_recovery_pending(struct xe_gt *gt)
+{
+ struct xe_memirq *memirq = &gt_to_tile(gt)->memirq;
+
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+
+ /* early detection until recovery starts */
+ if (xe_device_uses_memirq(gt_to_xe(gt)) &&
+ xe_memirq_guc_sw_int_0_irq_pending(memirq, &gt->uc.guc))
+ return true;
+
+ return READ_ONCE(gt->sriov.vf.migration.recovery_inprogress);
+}
+
+static bool vf_valid_ggtt(struct xe_gt *gt)
+{
+ struct xe_memirq *memirq = &gt_to_tile(gt)->memirq;
+ bool irq_pending = xe_device_uses_memirq(gt_to_xe(gt)) &&
+ xe_memirq_guc_sw_int_0_irq_pending(memirq, &gt->uc.guc);
+
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+
+ if (irq_pending || READ_ONCE(gt->sriov.vf.migration.ggtt_need_fixes))
+ return false;
+
+ return true;
+}
+
+/**
+ * xe_gt_sriov_vf_wait_valid_ggtt() - VF wait for valid GGTT addresses
+ * @gt: the &xe_gt
+ */
+void xe_gt_sriov_vf_wait_valid_ggtt(struct xe_gt *gt)
+{
+ int ret;
+
+ if (!IS_SRIOV_VF(gt_to_xe(gt)) ||
+ !xe_sriov_vf_migration_supported(gt_to_xe(gt)))
+ return;
+
+ ret = wait_event_interruptible_timeout(gt->sriov.vf.migration.wq,
+ vf_valid_ggtt(gt),
+ HZ * 5);
+ xe_gt_WARN_ON(gt, !ret);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
index 0af1dc769fe0..af40276790fa 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
@@ -21,16 +21,15 @@ void xe_gt_sriov_vf_guc_versions(struct xe_gt *gt,
int xe_gt_sriov_vf_query_config(struct xe_gt *gt);
int xe_gt_sriov_vf_connect(struct xe_gt *gt);
int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt);
-void xe_gt_sriov_vf_default_lrcs_hwsp_rebase(struct xe_gt *gt);
-int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt);
void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt);
+int xe_gt_sriov_vf_init_early(struct xe_gt *gt);
+int xe_gt_sriov_vf_init(struct xe_gt *gt);
+bool xe_gt_sriov_vf_recovery_pending(struct xe_gt *gt);
+
u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt);
u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt);
u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt);
-u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt);
-u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt);
-s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt);
u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg);
void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val);
@@ -39,4 +38,6 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p);
void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p);
void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p);
+void xe_gt_sriov_vf_wait_valid_ggtt(struct xe_gt *gt);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
index 298dedf4b009..420b0e6089de 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
@@ -7,20 +7,14 @@
#define _XE_GT_SRIOV_VF_TYPES_H_
#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
#include "xe_uc_fw_types.h"
/**
* struct xe_gt_sriov_vf_selfconfig - VF configuration data.
*/
struct xe_gt_sriov_vf_selfconfig {
- /** @ggtt_base: assigned base offset of the GGTT region. */
- u64 ggtt_base;
- /** @ggtt_size: assigned size of the GGTT region. */
- u64 ggtt_size;
- /** @ggtt_shift: difference in ggtt_base on last migration */
- s64 ggtt_shift;
- /** @lmem_size: assigned size of the LMEM. */
- u64 lmem_size;
/** @num_ctxs: assigned number of GuC submission context IDs. */
u16 num_ctxs;
/** @num_dbs: assigned number of GuC doorbells IDs. */
@@ -47,6 +41,28 @@ struct xe_gt_sriov_vf_runtime {
};
/**
+ * xe_gt_sriov_vf_migration - VF migration data.
+ */
+struct xe_gt_sriov_vf_migration {
+ /** @migration: VF migration recovery worker */
+ struct work_struct worker;
+ /** @lock: Protects recovery_queued, teardown */
+ spinlock_t lock;
+ /** @wq: wait queue for migration fixes */
+ wait_queue_head_t wq;
+ /** @scratch: Scratch memory for VF recovery */
+ void *scratch;
+ /** @recovery_teardown: VF post migration recovery is being torn down */
+ bool recovery_teardown;
+ /** @recovery_queued: VF post migration recovery in queued */
+ bool recovery_queued;
+ /** @recovery_inprogress: VF post migration recovery in progress */
+ bool recovery_inprogress;
+ /** @ggtt_need_fixes: VF GGTT needs fixes */
+ bool ggtt_need_fixes;
+};
+
+/**
* struct xe_gt_sriov_vf - GT level VF virtualization data.
*/
struct xe_gt_sriov_vf {
@@ -58,6 +74,8 @@ struct xe_gt_sriov_vf {
struct xe_gt_sriov_vf_selfconfig self_config;
/** @runtime: runtime data retrieved from the PF. */
struct xe_gt_sriov_vf_runtime runtime;
+ /** @migration: migration data for the VF. */
+ struct xe_gt_sriov_vf_migration migration;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
index 4e61c5e39bcb..1e0516ba7422 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.c
+++ b/drivers/gpu/drm/xe/xe_gt_topology.c
@@ -148,7 +148,11 @@ load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask)
if (!xe_gt_topology_report_l3(gt))
return;
- if (GRAPHICS_VER(xe) >= 30) {
+ if (GRAPHICS_VER(xe) >= 35) {
+ u32 fuse_val = xe_mmio_read32(mmio, MIRROR_L3BANK_ENABLE);
+
+ bitmap_from_arr32(l3_bank_mask, &fuse_val, 32);
+ } else if (GRAPHICS_VER(xe) >= 30) {
xe_l3_bank_mask_t per_node = {};
u32 meml3_en = REG_FIELD_GET(XE2_NODE_ENABLE_MASK, fuse3);
u32 mirror_l3bank_enable = xe_mmio_read32(mmio, MIRROR_L3BANK_ENABLE);
@@ -269,8 +273,14 @@ static const char *eu_type_to_str(enum xe_gt_eu_type eu_type)
return NULL;
}
-void
-xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p)
+/**
+ * xe_gt_topology_dump() - Dump GT topology into a drm printer.
+ * @gt: the &xe_gt
+ * @p: the &drm_printer
+ *
+ * Return: always 0.
+ */
+int xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p)
{
drm_printf(p, "dss mask (geometry): %*pb\n", XE_MAX_DSS_FUSE_BITS,
gt->fuse_topo.g_dss_mask);
@@ -285,6 +295,7 @@ xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p)
if (xe_gt_topology_report_l3(gt))
drm_printf(p, "L3 bank mask: %*pb\n", XE_MAX_L3_BANK_MASK_BITS,
gt->fuse_topo.l3_bank_mask);
+ return 0;
}
/*
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.h b/drivers/gpu/drm/xe/xe_gt_topology.h
index 5e62f5949b7b..3ff40f44bf2a 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.h
+++ b/drivers/gpu/drm/xe/xe_gt_topology.h
@@ -23,7 +23,7 @@ struct drm_printer;
void xe_gt_topology_init(struct xe_gt *gt);
-void xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p);
+int xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p);
/**
* xe_gt_topology_mask_last_dss() - Returns the index of the last DSS in a mask.
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 66158105aca5..d93faa1eedef 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -73,6 +73,21 @@ enum xe_steering_type {
SQIDI_PSMI,
/*
+ * The bspec lists multiple ranges as "PSMI," but the different
+ * ranges with that label have different grpid steering values so we
+ * treat them independently in code. Note that the ranges with grpid=0
+ * are included in the INSTANCE0 group above.
+ */
+ PSMI19,
+
+ /*
+ * Although most GAM ranges must be steered to (0,0) and thus use the
+ * INSTANCE0 type farther down, some platforms have special rules
+ * for specific subtypes that require steering to (1,0) instead.
+ */
+ GAM1,
+
+ /*
* On some platforms there are multiple types of MCR registers that
* will always return a non-terminated value at instance (0, 0). We'll
* lump those all into a single category to keep things simple.
@@ -202,14 +217,14 @@ struct xe_gt {
/**
* @usm.bb_pool: Pool from which batchbuffers, for USM operations
* (e.g. migrations, fixing page tables), are allocated.
- * Dedicated pool needed so USM operations to not get blocked
+ * Dedicated pool needed so USM operations do not get blocked
* behind any user operations which may have resulted in a
* fault.
*/
struct xe_sa_manager *bb_pool;
/**
* @usm.reserved_bcs_instance: reserved BCS instance used for USM
- * operations (e.g. mmigrations, fixing page tables)
+ * operations (e.g. migrations, fixing page tables)
*/
u16 reserved_bcs_instance;
/** @usm.pf_wq: page fault work queue, unbound, high priority */
@@ -220,8 +235,8 @@ struct xe_gt {
* @usm.pf_queue: Page fault queue used to sync faults so faults can
* be processed not under the GuC CT lock. The queue is sized so
* it can sync all possible faults (1 per physical engine).
- * Multiple queues exists for page faults from different VMs are
- * be processed in parallel.
+ * Multiple queues exist for page faults from different VMs to be
+ * processed in parallel.
*/
struct pf_queue {
/** @usm.pf_queue.gt: back pointer to GT */
@@ -387,7 +402,7 @@ struct xe_gt {
/**
* @wa_active.oob_initialized: mark oob as initialized to help
* detecting misuse of XE_GT_WA() - it can only be called on
- * initialization after OOB WAs have being processed
+ * initialization after OOB WAs have been processed
*/
bool oob_initialized;
} wa_active;
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index 00789844ea4d..d94490979adc 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -5,6 +5,7 @@
#include "xe_guc.h"
+#include <linux/iopoll.h>
#include <drm/drm_managed.h>
#include <generated/xe_wa_oob.h>
@@ -971,20 +972,93 @@ static int guc_xfer_rsa(struct xe_guc *guc)
}
/*
- * Check a previously read GuC status register (GUC_STATUS) looking for
- * known terminal states (either completion or failure) of either the
- * microkernel status field or the boot ROM status field. Returns +1 for
- * successful completion, -1 for failure and 0 for any intermediate state.
+ * Wait for the GuC to start up.
+ *
+ * Measurements indicate this should take no more than 20ms (assuming the GT
+ * clock is at maximum frequency). However, thermal throttling and other issues
+ * can prevent the clock hitting max and thus making the load take significantly
+ * longer. Allow up to 3s as a safety margin in normal builds. For
+ * CONFIG_DRM_XE_DEBUG allow up to 10s to account for slower execution, issues
+ * in PCODE, driver, fan, etc.
+ *
+ * Keep checking the GUC_STATUS every 10ms with a debug message every 100
+ * attempts as a "I'm slow, but alive" message. Regardless, if it takes more
+ * than 200ms, emit a warning.
*/
-static int guc_load_done(u32 status)
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+#define GUC_LOAD_TIMEOUT_SEC 20
+#else
+#define GUC_LOAD_TIMEOUT_SEC 3
+#endif
+#define GUC_LOAD_TIME_WARN_MSEC 200
+
+static void print_load_status_err(struct xe_gt *gt, u32 status)
{
- u32 uk_val = REG_FIELD_GET(GS_UKERNEL_MASK, status);
- u32 br_val = REG_FIELD_GET(GS_BOOTROM_MASK, status);
+ struct xe_mmio *mmio = &gt->mmio;
+ u32 ukernel = REG_FIELD_GET(GS_UKERNEL_MASK, status);
+ u32 bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, status);
+
+ xe_gt_err(gt, "load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n",
+ REG_FIELD_GET(GS_MIA_IN_RESET, status),
+ bootrom, ukernel,
+ REG_FIELD_GET(GS_MIA_MASK, status),
+ REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
- switch (uk_val) {
+ switch (bootrom) {
+ case XE_BOOTROM_STATUS_NO_KEY_FOUND:
+ xe_gt_err(gt, "invalid key requested, header = 0x%08X\n",
+ xe_mmio_read32(mmio, GUC_HEADER_INFO));
+ break;
+ case XE_BOOTROM_STATUS_RSA_FAILED:
+ xe_gt_err(gt, "firmware signature verification failed\n");
+ break;
+ case XE_BOOTROM_STATUS_PROD_KEY_CHECK_FAILURE:
+ xe_gt_err(gt, "firmware production part check failure\n");
+ break;
+ }
+
+ switch (ukernel) {
+ case XE_GUC_LOAD_STATUS_HWCONFIG_START:
+ xe_gt_err(gt, "still extracting hwconfig table.\n");
+ break;
+ case XE_GUC_LOAD_STATUS_EXCEPTION:
+ xe_gt_err(gt, "firmware exception. EIP: %#x\n",
+ xe_mmio_read32(mmio, SOFT_SCRATCH(13)));
+ break;
+ case XE_GUC_LOAD_STATUS_INIT_DATA_INVALID:
+ xe_gt_err(gt, "illegal init/ADS data\n");
+ break;
+ case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
+ xe_gt_err(gt, "illegal register in save/restore workaround list\n");
+ break;
+ case XE_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
+ xe_gt_err(gt, "illegal workaround KLV data\n");
+ break;
+ case XE_GUC_LOAD_STATUS_INVALID_FTR_FLAG:
+ xe_gt_err(gt, "illegal feature flag specified\n");
+ break;
+ }
+}
+
+/*
+ * Check GUC_STATUS looking for known terminal states (either completion or
+ * failure) of either the microkernel status field or the boot ROM status field.
+ *
+ * Returns 1 for successful completion, -1 for failure and 0 for any
+ * intermediate state.
+ */
+static int guc_load_done(struct xe_gt *gt, u32 *status, u32 *tries)
+{
+ u32 ukernel, bootrom;
+
+ *status = xe_mmio_read32(&gt->mmio, GUC_STATUS);
+ ukernel = REG_FIELD_GET(GS_UKERNEL_MASK, *status);
+ bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, *status);
+
+ switch (ukernel) {
case XE_GUC_LOAD_STATUS_READY:
return 1;
-
case XE_GUC_LOAD_STATUS_ERROR_DEVID_BUILD_MISMATCH:
case XE_GUC_LOAD_STATUS_GUC_PREPROD_BUILD_MISMATCH:
case XE_GUC_LOAD_STATUS_ERROR_DEVID_INVALID_GUCTYPE:
@@ -1000,7 +1074,7 @@ static int guc_load_done(u32 status)
return -1;
}
- switch (br_val) {
+ switch (bootrom) {
case XE_BOOTROM_STATUS_NO_KEY_FOUND:
case XE_BOOTROM_STATUS_RSA_FAILED:
case XE_BOOTROM_STATUS_PAVPC_FAILED:
@@ -1014,165 +1088,58 @@ static int guc_load_done(u32 status)
return -1;
}
- return 0;
-}
+ if (++*tries >= 100) {
+ struct xe_guc_pc *guc_pc = &gt->uc.guc.pc;
-static s32 guc_pc_get_cur_freq(struct xe_guc_pc *guc_pc)
-{
- u32 freq;
- int ret = xe_guc_pc_get_cur_freq(guc_pc, &freq);
+ *tries = 0;
+ xe_gt_dbg(gt, "GuC load still in progress, freq = %dMHz (req %dMHz), status = 0x%08X [0x%02X/%02X]\n",
+ xe_guc_pc_get_act_freq(guc_pc),
+ xe_guc_pc_get_cur_freq_fw(guc_pc),
+ *status, ukernel, bootrom);
+ }
- return ret ? ret : freq;
+ return 0;
}
-/*
- * Wait for the GuC to start up.
- *
- * Measurements indicate this should take no more than 20ms (assuming the GT
- * clock is at maximum frequency). However, thermal throttling and other issues
- * can prevent the clock hitting max and thus making the load take significantly
- * longer. Allow up to 200ms as a safety margin for real world worst case situations.
- *
- * However, bugs anywhere from KMD to GuC to PCODE to fan failure in a CI farm can
- * lead to even longer times. E.g. if the GT is clamped to minimum frequency then
- * the load times can be in the seconds range. So the timeout is increased for debug
- * builds to ensure that problems can be correctly analysed. For release builds, the
- * timeout is kept short so that users don't wait forever to find out that there is a
- * problem. In either case, if the load took longer than is reasonable even with some
- * 'sensible' throttling, then flag a warning because something is not right.
- *
- * Note that there is a limit on how long an individual usleep_range() can wait for,
- * hence longer waits require wrapping a shorter wait in a loop.
- *
- * Note that the only reason an end user should hit the shorter timeout is in case of
- * extreme thermal throttling. And a system that is that hot during boot is probably
- * dead anyway!
- */
-#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
-#define GUC_LOAD_RETRY_LIMIT 20
-#else
-#define GUC_LOAD_RETRY_LIMIT 3
-#endif
-#define GUC_LOAD_TIME_WARN_MS 200
-
static int guc_wait_ucode(struct xe_guc *guc)
{
struct xe_gt *gt = guc_to_gt(guc);
- struct xe_mmio *mmio = &gt->mmio;
struct xe_guc_pc *guc_pc = &gt->uc.guc.pc;
- ktime_t before, after, delta;
- int load_done;
- u32 status = 0;
- int count = 0;
+ u32 before_freq, act_freq, cur_freq;
+ u32 status = 0, tries = 0;
+ ktime_t before;
u64 delta_ms;
- u32 before_freq;
+ int ret;
before_freq = xe_guc_pc_get_act_freq(guc_pc);
before = ktime_get();
- /*
- * Note, can't use any kind of timing information from the call to xe_mmio_wait.
- * It could return a thousand intermediate stages at random times. Instead, must
- * manually track the total time taken and locally implement the timeout.
- */
- do {
- u32 last_status = status & (GS_UKERNEL_MASK | GS_BOOTROM_MASK);
- int ret;
-
- /*
- * Wait for any change (intermediate or terminal) in the status register.
- * Note, the return value is a don't care. The only failure code is timeout
- * but the timeouts need to be accumulated over all the intermediate partial
- * timeouts rather than allowing a huge timeout each time. So basically, need
- * to treat a timeout no different to a value change.
- */
- ret = xe_mmio_wait32_not(mmio, GUC_STATUS, GS_UKERNEL_MASK | GS_BOOTROM_MASK,
- last_status, 1000 * 1000, &status, false);
- if (ret < 0)
- count++;
- after = ktime_get();
- delta = ktime_sub(after, before);
- delta_ms = ktime_to_ms(delta);
-
- load_done = guc_load_done(status);
- if (load_done != 0)
- break;
- if (delta_ms >= (GUC_LOAD_RETRY_LIMIT * 1000))
- break;
+ ret = poll_timeout_us(ret = guc_load_done(gt, &status, &tries), ret,
+ 10 * USEC_PER_MSEC,
+ GUC_LOAD_TIMEOUT_SEC * USEC_PER_SEC, false);
- xe_gt_dbg(gt, "load still in progress, timeouts = %d, freq = %dMHz (req %dMHz), status = 0x%08X [0x%02X/%02X]\n",
- count, xe_guc_pc_get_act_freq(guc_pc),
- guc_pc_get_cur_freq(guc_pc), status,
- REG_FIELD_GET(GS_BOOTROM_MASK, status),
- REG_FIELD_GET(GS_UKERNEL_MASK, status));
- } while (1);
+ delta_ms = ktime_to_ms(ktime_sub(ktime_get(), before));
+ act_freq = xe_guc_pc_get_act_freq(guc_pc);
+ cur_freq = xe_guc_pc_get_cur_freq_fw(guc_pc);
- if (load_done != 1) {
- u32 ukernel = REG_FIELD_GET(GS_UKERNEL_MASK, status);
- u32 bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, status);
-
- xe_gt_err(gt, "load failed: status = 0x%08X, time = %lldms, freq = %dMHz (req %dMHz), done = %d\n",
+ if (ret) {
+ xe_gt_err(gt, "load failed: status = 0x%08X, time = %lldms, freq = %dMHz (req %dMHz)\n",
status, delta_ms, xe_guc_pc_get_act_freq(guc_pc),
- guc_pc_get_cur_freq(guc_pc), load_done);
- xe_gt_err(gt, "load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n",
- REG_FIELD_GET(GS_MIA_IN_RESET, status),
- bootrom, ukernel,
- REG_FIELD_GET(GS_MIA_MASK, status),
- REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
-
- switch (bootrom) {
- case XE_BOOTROM_STATUS_NO_KEY_FOUND:
- xe_gt_err(gt, "invalid key requested, header = 0x%08X\n",
- xe_mmio_read32(mmio, GUC_HEADER_INFO));
- break;
-
- case XE_BOOTROM_STATUS_RSA_FAILED:
- xe_gt_err(gt, "firmware signature verification failed\n");
- break;
-
- case XE_BOOTROM_STATUS_PROD_KEY_CHECK_FAILURE:
- xe_gt_err(gt, "firmware production part check failure\n");
- break;
- }
-
- switch (ukernel) {
- case XE_GUC_LOAD_STATUS_HWCONFIG_START:
- xe_gt_err(gt, "still extracting hwconfig table.\n");
- break;
-
- case XE_GUC_LOAD_STATUS_EXCEPTION:
- xe_gt_err(gt, "firmware exception. EIP: %#x\n",
- xe_mmio_read32(mmio, SOFT_SCRATCH(13)));
- break;
-
- case XE_GUC_LOAD_STATUS_INIT_DATA_INVALID:
- xe_gt_err(gt, "illegal init/ADS data\n");
- break;
-
- case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
- xe_gt_err(gt, "illegal register in save/restore workaround list\n");
- break;
-
- case XE_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
- xe_gt_err(gt, "illegal workaround KLV data\n");
- break;
-
- case XE_GUC_LOAD_STATUS_INVALID_FTR_FLAG:
- xe_gt_err(gt, "illegal feature flag specified\n");
- break;
- }
+ xe_guc_pc_get_cur_freq_fw(guc_pc));
+ print_load_status_err(gt, status);
return -EPROTO;
- } else if (delta_ms > GUC_LOAD_TIME_WARN_MS) {
- xe_gt_warn(gt, "excessive init time: %lldms! [status = 0x%08X, timeouts = %d]\n",
- delta_ms, status, count);
- xe_gt_warn(gt, "excessive init time: [freq = %dMHz (req = %dMHz), before = %dMHz, perf_limit_reasons = 0x%08X]\n",
- xe_guc_pc_get_act_freq(guc_pc), guc_pc_get_cur_freq(guc_pc),
- before_freq, xe_gt_throttle_get_limit_reasons(gt));
+ }
+
+ if (delta_ms > GUC_LOAD_TIME_WARN_MSEC) {
+ xe_gt_warn(gt, "GuC load: excessive init time: %lldms! [status = 0x%08X]\n",
+ delta_ms, status);
+ xe_gt_warn(gt, "GuC load: excessive init time: [freq = %dMHz (req = %dMHz), before = %dMHz, perf_limit_reasons = 0x%08X]\n",
+ act_freq, cur_freq, before_freq,
+ xe_gt_throttle_get_limit_reasons(gt));
} else {
- xe_gt_dbg(gt, "init took %lldms, freq = %dMHz (req = %dMHz), before = %dMHz, status = 0x%08X, timeouts = %d\n",
- delta_ms, xe_guc_pc_get_act_freq(guc_pc), guc_pc_get_cur_freq(guc_pc),
- before_freq, status, count);
+ xe_gt_dbg(gt, "GuC load: init took %lldms, freq = %dMHz (req = %dMHz), before = %dMHz, status = 0x%08X\n",
+ delta_ms, act_freq, cur_freq, before_freq, status);
}
return 0;
@@ -1472,7 +1439,7 @@ timeout:
BUILD_BUG_ON((GUC_HXG_TYPE_RESPONSE_SUCCESS ^ GUC_HXG_TYPE_RESPONSE_FAILURE) != 1);
ret = xe_mmio_wait32(mmio, reply_reg, resp_mask, resp_mask,
- 1000000, &header, false);
+ 2000000, &header, false);
if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
GUC_HXG_ORIGIN_GUC))
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 58e0b0294a5b..22ac2a8b74c8 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -18,6 +18,7 @@
#include "xe_bo.h"
#include "xe_gt.h"
#include "xe_gt_ccs_mode.h"
+#include "xe_gt_mcr.h"
#include "xe_gt_printk.h"
#include "xe_guc.h"
#include "xe_guc_buf.h"
@@ -30,7 +31,6 @@
#include "xe_platform_types.h"
#include "xe_uc_fw.h"
#include "xe_wa.h"
-#include "xe_gt_mcr.h"
/* Slack of a few additional entries per engine */
#define ADS_REGSET_EXTRA_MAX 8
diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c
index 243dad3e2418..0c1fbe97b8bf 100644
--- a/drivers/gpu/drm/xe/xe_guc_capture.c
+++ b/drivers/gpu/drm/xe/xe_guc_capture.c
@@ -122,6 +122,7 @@ struct __guc_capture_parsed_output {
{ RING_IPEHR(0), REG_32BIT, 0, 0, 0, "IPEHR"}, \
{ RING_INSTDONE(0), REG_32BIT, 0, 0, 0, "RING_INSTDONE"}, \
{ INDIRECT_RING_STATE(0), REG_32BIT, 0, 0, 0, "INDIRECT_RING_STATE"}, \
+ { RING_CURRENT_LRCA(0), REG_32BIT, 0, 0, 0, "CURRENT_LRCA"}, \
{ RING_ACTHD(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
{ RING_ACTHD_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "ACTHD"}, \
{ RING_BBADDR(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
@@ -149,6 +150,9 @@ struct __guc_capture_parsed_output {
{ SFC_DONE(2), 0, 0, 0, 0, "SFC_DONE[2]"}, \
{ SFC_DONE(3), 0, 0, 0, 0, "SFC_DONE[3]"}
+#define XE3P_BASE_ENGINE_INSTANCE \
+ { RING_CSMQDEBUG(0), REG_32BIT, 0, 0, 0, "CSMQDEBUG"}
+
/* XE_LP Global */
static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = {
COMMON_XELP_BASE_GLOBAL,
@@ -195,6 +199,12 @@ static const struct __guc_mmio_reg_descr xe_lp_gsc_inst_regs[] = {
COMMON_BASE_ENGINE_INSTANCE,
};
+/* Render / Compute Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe3p_rc_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+ XE3P_BASE_ENGINE_INSTANCE,
+};
+
/*
* Empty list to prevent warnings about unknown class/instance types
* as not all class/instance types have entries on all platforms.
@@ -245,6 +255,21 @@ static const struct __guc_mmio_reg_descr_group xe_hpg_lists[] = {
{}
};
+ /* List of lists for Xe3p and beyond */
+static const struct __guc_mmio_reg_descr_group xe3p_lists[] = {
+ MAKE_REGLIST(xe_lp_global_regs, PF, GLOBAL, 0),
+ MAKE_REGLIST(xe_hpg_rc_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
+ MAKE_REGLIST(xe3p_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEO),
+ MAKE_REGLIST(xe_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEO),
+ MAKE_REGLIST(xe_vec_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
+ MAKE_REGLIST(xe_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_BLITTER),
+ MAKE_REGLIST(xe_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_BLITTER),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
+ MAKE_REGLIST(xe_lp_gsc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
+ {}
+};
static const char * const capture_list_type_names[] = {
"Global",
"Class",
@@ -292,7 +317,9 @@ guc_capture_remove_stale_matches_from_list(struct xe_guc_state_capture *gc,
static const struct __guc_mmio_reg_descr_group *
guc_capture_get_device_reglist(struct xe_device *xe)
{
- if (GRAPHICS_VERx100(xe) >= 1255)
+ if (GRAPHICS_VER(xe) >= 35)
+ return xe3p_lists;
+ else if (GRAPHICS_VERx100(xe) >= 1255)
return xe_hpg_lists;
else
return xe_lp_lists;
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 18f6327bf552..e68953ef3a00 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -25,7 +25,6 @@
#include "xe_gt_printk.h"
#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_sriov_pf_monitor.h"
-#include "xe_gt_sriov_printk.h"
#include "xe_guc.h"
#include "xe_guc_log.h"
#include "xe_guc_relay.h"
@@ -33,6 +32,7 @@
#include "xe_guc_tlb_inval.h"
#include "xe_map.h"
#include "xe_pm.h"
+#include "xe_sriov_vf.h"
#include "xe_trace_guc.h"
static void receive_g2h(struct xe_guc_ct *ct);
@@ -93,8 +93,6 @@ struct g2h_fence {
bool done;
};
-#define make_u64(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
-
static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
{
memset(g2h_fence, 0, sizeof(*g2h_fence));
@@ -169,6 +167,7 @@ ct_to_xe(struct xe_guc_ct *ct)
*/
#define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
+#define CTB_H2G_BUFFER_OFFSET (CTB_DESC_SIZE * 2)
#define CTB_H2G_BUFFER_SIZE (SZ_4K)
#define CTB_G2H_BUFFER_SIZE (SZ_128K)
#define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 2)
@@ -192,7 +191,7 @@ long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct)
static size_t guc_ct_size(void)
{
- return 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE +
+ return CTB_H2G_BUFFER_OFFSET + CTB_H2G_BUFFER_SIZE +
CTB_G2H_BUFFER_SIZE;
}
@@ -333,7 +332,7 @@ static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g,
h2g->desc = *map;
xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
- h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2);
+ h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_H2G_BUFFER_OFFSET);
}
static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
@@ -351,7 +350,7 @@ static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE);
xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
- g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2 +
+ g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_H2G_BUFFER_OFFSET +
CTB_H2G_BUFFER_SIZE);
}
@@ -362,7 +361,7 @@ static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct)
int err;
desc_addr = xe_bo_ggtt_addr(ct->bo);
- ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2;
+ ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_H2G_BUFFER_OFFSET;
size = ct->ctbs.h2g.info.size * sizeof(u32);
err = xe_guc_self_cfg64(guc,
@@ -389,7 +388,7 @@ static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct)
int err;
desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE;
- ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2 +
+ ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_H2G_BUFFER_OFFSET +
CTB_H2G_BUFFER_SIZE;
size = ct->ctbs.g2h.info.size * sizeof(u32);
@@ -503,7 +502,7 @@ static void ct_exit_safe_mode(struct xe_guc_ct *ct)
xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode disabled\n");
}
-int xe_guc_ct_enable(struct xe_guc_ct *ct)
+static int __xe_guc_ct_start(struct xe_guc_ct *ct, bool needs_register)
{
struct xe_device *xe = ct_to_xe(ct);
struct xe_gt *gt = ct_to_gt(ct);
@@ -511,21 +510,29 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
xe_gt_assert(gt, !xe_guc_ct_enabled(ct));
- xe_map_memset(xe, &ct->bo->vmap, 0, 0, xe_bo_size(ct->bo));
- guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
- guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
+ if (needs_register) {
+ xe_map_memset(xe, &ct->bo->vmap, 0, 0, xe_bo_size(ct->bo));
+ guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
+ guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
- err = guc_ct_ctb_h2g_register(ct);
- if (err)
- goto err_out;
+ err = guc_ct_ctb_h2g_register(ct);
+ if (err)
+ goto err_out;
- err = guc_ct_ctb_g2h_register(ct);
- if (err)
- goto err_out;
+ err = guc_ct_ctb_g2h_register(ct);
+ if (err)
+ goto err_out;
- err = guc_ct_control_toggle(ct, true);
- if (err)
- goto err_out;
+ err = guc_ct_control_toggle(ct, true);
+ if (err)
+ goto err_out;
+ } else {
+ ct->ctbs.h2g.info.broken = false;
+ ct->ctbs.g2h.info.broken = false;
+ /* Skip everything in H2G buffer */
+ xe_map_memset(xe, &ct->bo->vmap, CTB_H2G_BUFFER_OFFSET, 0,
+ CTB_H2G_BUFFER_SIZE);
+ }
guc_ct_change_state(ct, XE_GUC_CT_STATE_ENABLED);
@@ -557,6 +564,32 @@ err_out:
return err;
}
+/**
+ * xe_guc_ct_restart() - Restart GuC CT
+ * @ct: the &xe_guc_ct
+ *
+ * Restart GuC CT to an empty state without issuing a CT register MMIO command.
+ *
+ * Return: 0 on success, or a negative errno on failure.
+ */
+int xe_guc_ct_restart(struct xe_guc_ct *ct)
+{
+ return __xe_guc_ct_start(ct, false);
+}
+
+/**
+ * xe_guc_ct_enable() - Enable GuC CT
+ * @ct: the &xe_guc_ct
+ *
+ * Enable GuC CT to an empty state and issue a CT register MMIO command.
+ *
+ * Return: 0 on success, or a negative errno on failure.
+ */
+int xe_guc_ct_enable(struct xe_guc_ct *ct)
+{
+ return __xe_guc_ct_start(ct, true);
+}
+
static void stop_g2h_handler(struct xe_guc_ct *ct)
{
cancel_work_sync(&ct->g2h_worker);
@@ -577,6 +610,16 @@ void xe_guc_ct_disable(struct xe_guc_ct *ct)
}
/**
+ * xe_guc_ct_flush_and_stop - Flush and stop all processing of G2H / H2G
+ * @ct: the &xe_guc_ct
+ */
+void xe_guc_ct_flush_and_stop(struct xe_guc_ct *ct)
+{
+ receive_g2h(ct);
+ xe_guc_ct_stop(ct);
+}
+
+/**
* xe_guc_ct_stop - Set GuC to stopped state
* @ct: the &xe_guc_ct
*
@@ -739,6 +782,28 @@ static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
return seqno;
}
+#define MAKE_ACTION(type, __action) \
+({ \
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, type) | \
+ FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | \
+ GUC_HXG_EVENT_MSG_0_DATA0, __action); \
+})
+
+static bool vf_action_can_safely_fail(struct xe_device *xe, u32 action)
+{
+ /*
+ * When resuming a VF, we can't reliably track whether context
+ * registration has completed in the GuC state machine. It is harmless
+ * to resend the request, as it will fail silently if GUC_HXG_TYPE_EVENT
+ * is used. Additionally, if there is an H2G protocol issue on a VF,
+ * subsequent H2G messages sent as GUC_HXG_TYPE_FAST_REQUEST will likely
+ * fail.
+ */
+ return IS_SRIOV_VF(xe) && xe_sriov_vf_migration_supported(xe) &&
+ (action == XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC ||
+ action == XE_GUC_ACTION_REGISTER_CONTEXT);
+}
+
#define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */
static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
@@ -810,18 +875,14 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value);
if (want_response) {
- cmd[1] =
- FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
- FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
- GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
+ cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_REQUEST, action[0]);
+ } else if (vf_action_can_safely_fail(xe, action[0])) {
+ cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_EVENT, action[0]);
} else {
fast_req_track(ct, ct_fence_value,
FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, action[0]));
- cmd[1] =
- FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) |
- FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
- GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
+ cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_FAST_REQUEST, action[0]);
}
/* H2G header in cmd[1] replaces action[0] so: */
@@ -854,7 +915,7 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
u32 len, u32 g2h_len, u32 num_g2h,
struct g2h_fence *g2h_fence)
{
- struct xe_gt *gt __maybe_unused = ct_to_gt(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
u16 seqno;
int ret;
@@ -875,7 +936,7 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
goto out;
}
- if (ct->state == XE_GUC_CT_STATE_STOPPED) {
+ if (ct->state == XE_GUC_CT_STATE_STOPPED || xe_gt_recovery_pending(gt)) {
ret = -ECANCELED;
goto out;
}
@@ -930,22 +991,15 @@ static void kick_reset(struct xe_guc_ct *ct)
static int dequeue_one_g2h(struct xe_guc_ct *ct);
-static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
- u32 g2h_len, u32 num_g2h,
- struct g2h_fence *g2h_fence)
+/*
+ * wait before retry of sending h2g message
+ * Return: true if ready for retry, false if the wait timeouted
+ */
+static bool guc_ct_send_wait_for_retry(struct xe_guc_ct *ct, u32 len,
+ u32 g2h_len, struct g2h_fence *g2h_fence,
+ unsigned int *sleep_period_ms)
{
struct xe_device *xe = ct_to_xe(ct);
- struct xe_gt *gt = ct_to_gt(ct);
- unsigned int sleep_period_ms = 1;
- int ret;
-
- xe_gt_assert(gt, !g2h_len || !g2h_fence);
- lockdep_assert_held(&ct->lock);
- xe_device_assert_mem_access(ct_to_xe(ct));
-
-try_again:
- ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h,
- g2h_fence);
/*
* We wait to try to restore credits for about 1 second before bailing.
@@ -954,24 +1008,22 @@ try_again:
* the case of G2H we process any G2H in the channel, hopefully freeing
* credits as we consume the G2H messages.
*/
- if (unlikely(ret == -EBUSY &&
- !h2g_has_room(ct, len + GUC_CTB_HDR_LEN))) {
+ if (!h2g_has_room(ct, len + GUC_CTB_HDR_LEN)) {
struct guc_ctb *h2g = &ct->ctbs.h2g;
- if (sleep_period_ms == 1024)
- goto broken;
+ if (*sleep_period_ms == 1024)
+ return false;
trace_xe_guc_ct_h2g_flow_control(xe, h2g->info.head, h2g->info.tail,
h2g->info.size,
h2g->info.space,
len + GUC_CTB_HDR_LEN);
- msleep(sleep_period_ms);
- sleep_period_ms <<= 1;
-
- goto try_again;
- } else if (unlikely(ret == -EBUSY)) {
+ msleep(*sleep_period_ms);
+ *sleep_period_ms <<= 1;
+ } else {
struct xe_device *xe = ct_to_xe(ct);
struct guc_ctb *g2h = &ct->ctbs.g2h;
+ int ret;
trace_xe_guc_ct_g2h_flow_control(xe, g2h->info.head,
desc_read(xe, g2h, tail),
@@ -985,7 +1037,7 @@ try_again:
(desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.info.head)
if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding ||
g2h_avail(ct), HZ))
- goto broken;
+ return false;
#undef g2h_avail
ret = dequeue_one_g2h(ct);
@@ -993,9 +1045,32 @@ try_again:
if (ret != -ECANCELED)
xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)",
ERR_PTR(ret));
- goto broken;
+ return false;
}
+ }
+ return true;
+}
+
+static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ u32 g2h_len, u32 num_g2h,
+ struct g2h_fence *g2h_fence)
+{
+ struct xe_gt *gt = ct_to_gt(ct);
+ unsigned int sleep_period_ms = 1;
+ int ret;
+
+ xe_gt_assert(gt, !g2h_len || !g2h_fence);
+ lockdep_assert_held(&ct->lock);
+ xe_device_assert_mem_access(ct_to_xe(ct));
+try_again:
+ ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h,
+ g2h_fence);
+
+ if (unlikely(ret == -EBUSY)) {
+ if (!guc_ct_send_wait_for_retry(ct, len, g2h_len, g2h_fence,
+ &sleep_period_ms))
+ goto broken;
goto try_again;
}
@@ -1337,6 +1412,10 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
fast_req_report(ct, fence);
+ /* FIXME: W/A race in the GuC, will get in firmware soon */
+ if (xe_gt_recovery_pending(gt))
+ return 0;
+
CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE);
return -EPROTO;
@@ -1793,186 +1872,6 @@ static void g2h_worker_func(struct work_struct *w)
receive_g2h(ct);
}
-static void xe_fixup_u64_in_cmds(struct xe_device *xe, struct iosys_map *cmds,
- u32 size, u32 idx, s64 shift)
-{
- u32 hi, lo;
- u64 offset;
-
- lo = xe_map_rd_ring_u32(xe, cmds, idx, size);
- hi = xe_map_rd_ring_u32(xe, cmds, idx + 1, size);
- offset = make_u64(hi, lo);
- offset += shift;
- lo = lower_32_bits(offset);
- hi = upper_32_bits(offset);
- xe_map_wr_ring_u32(xe, cmds, idx, size, lo);
- xe_map_wr_ring_u32(xe, cmds, idx + 1, size, hi);
-}
-
-/*
- * Shift any GGTT addresses within a single message left within CTB from
- * before post-migration recovery.
- * @ct: pointer to CT struct of the target GuC
- * @cmds: iomap buffer containing CT messages
- * @head: start of the target message within the buffer
- * @len: length of the target message
- * @size: size of the commands buffer
- * @shift: the address shift to be added to each GGTT reference
- * Return: true if the message was fixed or needed no fixups, false on failure
- */
-static bool ct_fixup_ggtt_in_message(struct xe_guc_ct *ct,
- struct iosys_map *cmds, u32 head,
- u32 len, u32 size, s64 shift)
-{
- struct xe_gt *gt = ct_to_gt(ct);
- struct xe_device *xe = ct_to_xe(ct);
- u32 msg[GUC_HXG_MSG_MIN_LEN];
- u32 action, i, n;
-
- xe_gt_assert(gt, len >= GUC_HXG_MSG_MIN_LEN);
-
- msg[0] = xe_map_rd_ring_u32(xe, cmds, head, size);
- action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]);
-
- xe_gt_sriov_dbg_verbose(gt, "fixing H2G %#x\n", action);
-
- switch (action) {
- case XE_GUC_ACTION_REGISTER_CONTEXT:
- if (len != XE_GUC_REGISTER_CONTEXT_MSG_LEN)
- goto err_len;
- xe_fixup_u64_in_cmds(xe, cmds, size, head +
- XE_GUC_REGISTER_CONTEXT_DATA_5_WQ_DESC_ADDR_LOWER,
- shift);
- xe_fixup_u64_in_cmds(xe, cmds, size, head +
- XE_GUC_REGISTER_CONTEXT_DATA_7_WQ_BUF_BASE_LOWER,
- shift);
- xe_fixup_u64_in_cmds(xe, cmds, size, head +
- XE_GUC_REGISTER_CONTEXT_DATA_10_HW_LRC_ADDR, shift);
- break;
- case XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC:
- if (len < XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN)
- goto err_len;
- n = xe_map_rd_ring_u32(xe, cmds, head +
- XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_10_NUM_CTXS, size);
- if (len != XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN + 2 * n)
- goto err_len;
- xe_fixup_u64_in_cmds(xe, cmds, size, head +
- XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_5_WQ_DESC_ADDR_LOWER,
- shift);
- xe_fixup_u64_in_cmds(xe, cmds, size, head +
- XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_7_WQ_BUF_BASE_LOWER,
- shift);
- for (i = 0; i < n; i++)
- xe_fixup_u64_in_cmds(xe, cmds, size, head +
- XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_11_HW_LRC_ADDR
- + 2 * i, shift);
- break;
- default:
- break;
- }
- return true;
-
-err_len:
- xe_gt_err(gt, "Skipped G2G %#x message fixups, unexpected length (%u)\n", action, len);
- return false;
-}
-
-/*
- * Apply fixups to the next outgoing CT message within given CTB
- * @ct: the &xe_guc_ct struct instance representing the target GuC
- * @h2g: the &guc_ctb struct instance of the target buffer
- * @shift: shift to be added to all GGTT addresses within the CTB
- * @mhead: pointer to an integer storing message start position; the
- * position is changed to next message before this function return
- * @avail: size of the area available for parsing, that is length
- * of all remaining messages stored within the CTB
- * Return: size of the area available for parsing after one message
- * has been parsed, that is length remaining from the updated mhead
- */
-static int ct_fixup_ggtt_in_buffer(struct xe_guc_ct *ct, struct guc_ctb *h2g,
- s64 shift, u32 *mhead, s32 avail)
-{
- struct xe_gt *gt = ct_to_gt(ct);
- struct xe_device *xe = ct_to_xe(ct);
- u32 msg[GUC_HXG_MSG_MIN_LEN];
- u32 size = h2g->info.size;
- u32 head = *mhead;
- u32 len;
-
- xe_gt_assert(gt, avail >= (s32)GUC_CTB_MSG_MIN_LEN);
-
- /* Read header */
- msg[0] = xe_map_rd_ring_u32(xe, &h2g->cmds, head, size);
- len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
-
- if (unlikely(len > (u32)avail)) {
- xe_gt_err(gt, "H2G channel broken on read, avail=%d, len=%d, fixups skipped\n",
- avail, len);
- return 0;
- }
-
- head = (head + GUC_CTB_MSG_MIN_LEN) % size;
- if (!ct_fixup_ggtt_in_message(ct, &h2g->cmds, head, msg_len_to_hxg_len(len), size, shift))
- return 0;
- *mhead = (head + msg_len_to_hxg_len(len)) % size;
-
- return avail - len;
-}
-
-/**
- * xe_guc_ct_fixup_messages_with_ggtt - Fixup any pending H2G CTB messages
- * @ct: pointer to CT struct of the target GuC
- * @ggtt_shift: shift to be added to all GGTT addresses within the CTB
- *
- * Messages in GuC to Host CTB are owned by GuC and any fixups in them
- * are made by GuC. But content of the Host to GuC CTB is owned by the
- * KMD, so fixups to GGTT references in any pending messages need to be
- * applied here.
- * This function updates GGTT offsets in payloads of pending H2G CTB
- * messages (messages which were not consumed by GuC before the VF got
- * paused).
- */
-void xe_guc_ct_fixup_messages_with_ggtt(struct xe_guc_ct *ct, s64 ggtt_shift)
-{
- struct guc_ctb *h2g = &ct->ctbs.h2g;
- struct xe_guc *guc = ct_to_guc(ct);
- struct xe_gt *gt = guc_to_gt(guc);
- u32 head, tail, size;
- s32 avail;
-
- if (unlikely(h2g->info.broken))
- return;
-
- h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
- head = h2g->info.head;
- tail = READ_ONCE(h2g->info.tail);
- size = h2g->info.size;
-
- if (unlikely(head > size))
- goto corrupted;
-
- if (unlikely(tail >= size))
- goto corrupted;
-
- avail = tail - head;
-
- /* beware of buffer wrap case */
- if (unlikely(avail < 0))
- avail += size;
- xe_gt_dbg(gt, "available %d (%u:%u:%u)\n", avail, head, tail, size);
- xe_gt_assert(gt, avail >= 0);
-
- while (avail > 0)
- avail = ct_fixup_ggtt_in_buffer(ct, h2g, ggtt_shift, &head, avail);
-
- return;
-
-corrupted:
- xe_gt_err(gt, "Corrupted H2G descriptor head=%u tail=%u size=%u, fixups not applied\n",
- head, tail, size);
- h2g->info.broken = true;
-}
-
static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic,
bool want_ctb)
{
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
index cf41210ab30a..ca1ce2b3c354 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct.h
@@ -15,8 +15,10 @@ int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct);
int xe_guc_ct_init(struct xe_guc_ct *ct);
int xe_guc_ct_init_post_hwconfig(struct xe_guc_ct *ct);
int xe_guc_ct_enable(struct xe_guc_ct *ct);
+int xe_guc_ct_restart(struct xe_guc_ct *ct);
void xe_guc_ct_disable(struct xe_guc_ct *ct);
void xe_guc_ct_stop(struct xe_guc_ct *ct);
+void xe_guc_ct_flush_and_stop(struct xe_guc_ct *ct);
void xe_guc_ct_fast_path(struct xe_guc_ct *ct);
struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct);
@@ -24,8 +26,6 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, struct drm_pr
void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot);
void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb);
-void xe_guc_ct_fixup_messages_with_ggtt(struct xe_guc_ct *ct, s64 ggtt_shift);
-
static inline bool xe_guc_ct_initialized(struct xe_guc_ct *ct)
{
return ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED;
@@ -74,4 +74,13 @@ xe_guc_ct_send_block_no_fail(struct xe_guc_ct *ct, const u32 *action, u32 len)
long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct);
+/**
+ * xe_guc_ct_wake_waiters() - GuC CT wake up waiters
+ * @ct: GuC CT object
+ */
+static inline void xe_guc_ct_wake_waiters(struct xe_guc_ct *ct)
+{
+ wake_up_all(&ct->wq);
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
index c30c0e3ccbbb..a3b034e4b205 100644
--- a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
@@ -51,6 +51,21 @@ struct xe_guc_exec_queue {
wait_queue_head_t suspend_wait;
/** @suspend_pending: a suspend of the exec_queue is pending */
bool suspend_pending;
+ /**
+ * @needs_cleanup: Needs a cleanup message during VF post migration
+ * recovery.
+ */
+ bool needs_cleanup;
+ /**
+ * @needs_suspend: Needs a suspend message during VF post migration
+ * recovery.
+ */
+ bool needs_suspend;
+ /**
+ * @needs_resume: Needs a resume message during VF post migration
+ * recovery.
+ */
+ bool needs_resume;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 53fdf59524c4..ff22235857f8 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -7,12 +7,14 @@
#include <linux/cleanup.h>
#include <linux/delay.h>
+#include <linux/iopoll.h>
#include <linux/jiffies.h>
#include <linux/ktime.h>
#include <linux/wait_bit.h>
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
+#include <generated/xe_device_wa_oob.h>
#include <generated/xe_wa_oob.h>
#include "abi/guc_actions_slpc_abi.h"
@@ -130,26 +132,16 @@ static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
static int wait_for_pc_state(struct xe_guc_pc *pc,
- enum slpc_global_state state,
+ enum slpc_global_state target_state,
int timeout_ms)
{
- int timeout_us = 1000 * timeout_ms;
- int slept, wait = 10;
+ enum slpc_global_state state;
xe_device_assert_mem_access(pc_to_xe(pc));
- for (slept = 0; slept < timeout_us;) {
- if (slpc_shared_data_read(pc, header.global_state) == state)
- return 0;
-
- usleep_range(wait, wait << 1);
- slept += wait;
- wait <<= 1;
- if (slept + wait > timeout_us)
- wait = timeout_us - slept;
- }
-
- return -ETIMEDOUT;
+ return poll_timeout_us(state = slpc_shared_data_read(pc, header.global_state),
+ state == target_state,
+ 20, timeout_ms * USEC_PER_MSEC, false);
}
static int wait_for_flush_complete(struct xe_guc_pc *pc)
@@ -164,24 +156,15 @@ static int wait_for_flush_complete(struct xe_guc_pc *pc)
return 0;
}
-static int wait_for_act_freq_limit(struct xe_guc_pc *pc, u32 freq)
+static int wait_for_act_freq_max_limit(struct xe_guc_pc *pc, u32 max_limit)
{
- int timeout_us = SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC;
- int slept, wait = 10;
-
- for (slept = 0; slept < timeout_us;) {
- if (xe_guc_pc_get_act_freq(pc) <= freq)
- return 0;
-
- usleep_range(wait, wait << 1);
- slept += wait;
- wait <<= 1;
- if (slept + wait > timeout_us)
- wait = timeout_us - slept;
- }
+ u32 freq;
- return -ETIMEDOUT;
+ return poll_timeout_us(freq = xe_guc_pc_get_act_freq(pc),
+ freq <= max_limit,
+ 20, SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC, false);
}
+
static int pc_action_reset(struct xe_guc_pc *pc)
{
struct xe_guc_ct *ct = pc_to_ct(pc);
@@ -904,7 +887,7 @@ static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
if (pc_get_min_freq(pc) > pc->rp0_freq)
ret = pc_set_min_freq(pc, pc->rp0_freq);
- if (XE_GT_WA(tile->primary_gt, 14022085890))
+ if (XE_DEVICE_WA(tile_to_xe(tile), 14022085890))
ret = pc_set_min_freq(pc, max(BMG_MIN_FREQ, pc_get_min_freq(pc)));
out:
@@ -983,7 +966,7 @@ void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc)
* Wait for actual freq to go below the flush cap: even if the previous
* max was below cap, the current one might still be above it
*/
- ret = wait_for_act_freq_limit(pc, BMG_MERT_FLUSH_FREQ_CAP);
+ ret = wait_for_act_freq_max_limit(pc, BMG_MERT_FLUSH_FREQ_CAP);
if (ret)
xe_gt_err_once(gt, "Actual freq did not reduce to %u, %pe\n",
BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
diff --git a/drivers/gpu/drm/xe/xe_guc_relay.c b/drivers/gpu/drm/xe/xe_guc_relay.c
index e5dc94f3e618..0c0ff24ba62a 100644
--- a/drivers/gpu/drm/xe/xe_guc_relay.c
+++ b/drivers/gpu/drm/xe/xe_guc_relay.c
@@ -56,9 +56,19 @@ static struct xe_device *relay_to_xe(struct xe_guc_relay *relay)
return gt_to_xe(relay_to_gt(relay));
}
+#define XE_RELAY_DIAG_RATELIMIT_INTERVAL (10 * HZ)
+#define XE_RELAY_DIAG_RATELIMIT_BURST 10
+
+#define relay_ratelimit_printk(relay, _level, fmt...) ({ \
+ typeof(relay) _r = (relay); \
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) || \
+ ___ratelimit(&_r->diag_ratelimit, "xe_guc_relay")) \
+ xe_gt_sriov_##_level(relay_to_gt(_r), "relay: " fmt); \
+})
+
#define relay_assert(relay, condition) xe_gt_assert(relay_to_gt(relay), condition)
-#define relay_notice(relay, msg...) xe_gt_sriov_notice(relay_to_gt(relay), "relay: " msg)
-#define relay_debug(relay, msg...) xe_gt_sriov_dbg_verbose(relay_to_gt(relay), "relay: " msg)
+#define relay_notice(relay, msg...) relay_ratelimit_printk((relay), notice, msg)
+#define relay_debug(relay, msg...) relay_ratelimit_printk((relay), dbg_verbose, msg)
static int relay_get_totalvfs(struct xe_guc_relay *relay)
{
@@ -345,6 +355,9 @@ int xe_guc_relay_init(struct xe_guc_relay *relay)
INIT_WORK(&relay->worker, relays_worker_fn);
INIT_LIST_HEAD(&relay->pending_relays);
INIT_LIST_HEAD(&relay->incoming_actions);
+ ratelimit_state_init(&relay->diag_ratelimit,
+ XE_RELAY_DIAG_RATELIMIT_INTERVAL,
+ XE_RELAY_DIAG_RATELIMIT_BURST);
err = mempool_init_kmalloc_pool(&relay->pool, XE_RELAY_MEMPOOL_MIN_NUM +
relay_get_totalvfs(relay),
diff --git a/drivers/gpu/drm/xe/xe_guc_relay_types.h b/drivers/gpu/drm/xe/xe_guc_relay_types.h
index 5999fcb77e96..20eee10856b2 100644
--- a/drivers/gpu/drm/xe/xe_guc_relay_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_relay_types.h
@@ -7,6 +7,7 @@
#define _XE_GUC_RELAY_TYPES_H_
#include <linux/mempool.h>
+#include <linux/ratelimit_types.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -31,6 +32,9 @@ struct xe_guc_relay {
/** @last_rid: last Relay-ID used while sending a message. */
u32 last_rid;
+
+ /** @diag_ratelimit: ratelimit state used to throttle diagnostics messages. */
+ struct ratelimit_state diag_ratelimit;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 94ed8159496f..0ef67d3523a7 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -70,6 +70,8 @@ exec_queue_to_guc(struct xe_exec_queue *q)
#define EXEC_QUEUE_STATE_BANNED (1 << 9)
#define EXEC_QUEUE_STATE_CHECK_TIMEOUT (1 << 10)
#define EXEC_QUEUE_STATE_EXTRA_REF (1 << 11)
+#define EXEC_QUEUE_STATE_PENDING_RESUME (1 << 12)
+#define EXEC_QUEUE_STATE_PENDING_TDR_EXIT (1 << 13)
static bool exec_queue_registered(struct xe_exec_queue *q)
{
@@ -141,6 +143,11 @@ static void set_exec_queue_destroyed(struct xe_exec_queue *q)
atomic_or(EXEC_QUEUE_STATE_DESTROYED, &q->guc->state);
}
+static void clear_exec_queue_destroyed(struct xe_exec_queue *q)
+{
+ atomic_and(~EXEC_QUEUE_STATE_DESTROYED, &q->guc->state);
+}
+
static bool exec_queue_banned(struct xe_exec_queue *q)
{
return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_BANNED;
@@ -221,6 +228,41 @@ static void set_exec_queue_extra_ref(struct xe_exec_queue *q)
atomic_or(EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state);
}
+static void clear_exec_queue_extra_ref(struct xe_exec_queue *q)
+{
+ atomic_and(~EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state);
+}
+
+static bool exec_queue_pending_resume(struct xe_exec_queue *q)
+{
+ return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_RESUME;
+}
+
+static void set_exec_queue_pending_resume(struct xe_exec_queue *q)
+{
+ atomic_or(EXEC_QUEUE_STATE_PENDING_RESUME, &q->guc->state);
+}
+
+static void clear_exec_queue_pending_resume(struct xe_exec_queue *q)
+{
+ atomic_and(~EXEC_QUEUE_STATE_PENDING_RESUME, &q->guc->state);
+}
+
+static bool exec_queue_pending_tdr_exit(struct xe_exec_queue *q)
+{
+ return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_TDR_EXIT;
+}
+
+static void set_exec_queue_pending_tdr_exit(struct xe_exec_queue *q)
+{
+ atomic_or(EXEC_QUEUE_STATE_PENDING_TDR_EXIT, &q->guc->state);
+}
+
+static void clear_exec_queue_pending_tdr_exit(struct xe_exec_queue *q)
+{
+ atomic_and(~EXEC_QUEUE_STATE_PENDING_TDR_EXIT, &q->guc->state);
+}
+
static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
{
return (atomic_read(&q->guc->state) &
@@ -670,6 +712,11 @@ static u32 wq_space_until_wrap(struct xe_exec_queue *q)
return (WQ_SIZE - q->guc->wqi_tail);
}
+static bool vf_recovery(struct xe_guc *guc)
+{
+ return xe_gt_recovery_pending(guc_to_gt(guc));
+}
+
static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
{
struct xe_guc *guc = exec_queue_to_guc(q);
@@ -679,7 +726,7 @@ static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
#define AVAILABLE_SPACE \
CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE)
- if (wqi_size > AVAILABLE_SPACE) {
+ if (wqi_size > AVAILABLE_SPACE && !vf_recovery(guc)) {
try_again:
q->guc->wqi_head = parallel_read(xe, map, wq_desc.head);
if (wqi_size > AVAILABLE_SPACE) {
@@ -736,18 +783,12 @@ static void wq_item_append(struct xe_exec_queue *q)
if (wq_wait_for_space(q, wqi_size))
return;
- xe_gt_assert(guc_to_gt(guc), i == XE_GUC_CONTEXT_WQ_HEADER_DATA_0_TYPE_LEN);
wqi[i++] = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) |
FIELD_PREP(WQ_LEN_MASK, len_dw);
- xe_gt_assert(guc_to_gt(guc), i == XE_GUC_CONTEXT_WQ_EL_INFO_DATA_1_CTX_DESC_LOW);
wqi[i++] = xe_lrc_descriptor(q->lrc[0]);
- xe_gt_assert(guc_to_gt(guc), i ==
- XE_GUC_CONTEXT_WQ_EL_INFO_DATA_2_GUCCTX_RINGTAIL_FREEZEPOCS);
wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, q->guc->id) |
FIELD_PREP(WQ_RING_TAIL_MASK, q->lrc[0]->ring.tail / sizeof(u64));
- xe_gt_assert(guc_to_gt(guc), i == XE_GUC_CONTEXT_WQ_EL_INFO_DATA_3_WI_FENCE_ID);
wqi[i++] = 0;
- xe_gt_assert(guc_to_gt(guc), i == XE_GUC_CONTEXT_WQ_EL_CHILD_LIST_DATA_4_RINGTAIL);
for (j = 1; j < q->width; ++j) {
struct xe_lrc *lrc = q->lrc[j];
@@ -768,52 +809,8 @@ static void wq_item_append(struct xe_exec_queue *q)
parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail);
}
-static int wq_items_rebase(struct xe_exec_queue *q)
-{
- struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
- struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
- int i = q->guc->wqi_head;
-
- /* the ring starts after a header struct */
- iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch, wq[0]));
-
- while ((i % WQ_SIZE) != (q->guc->wqi_tail % WQ_SIZE)) {
- u32 len_dw, type, val;
-
- if (drm_WARN_ON_ONCE(&xe->drm, i < 0 || i > 2 * WQ_SIZE))
- break;
-
- val = xe_map_rd_ring_u32(xe, &map, i / sizeof(u32) +
- XE_GUC_CONTEXT_WQ_HEADER_DATA_0_TYPE_LEN,
- WQ_SIZE / sizeof(u32));
- len_dw = FIELD_GET(WQ_LEN_MASK, val);
- type = FIELD_GET(WQ_TYPE_MASK, val);
-
- if (drm_WARN_ON_ONCE(&xe->drm, len_dw >= WQ_SIZE / sizeof(u32)))
- break;
-
- if (type == WQ_TYPE_MULTI_LRC) {
- val = xe_lrc_descriptor(q->lrc[0]);
- xe_map_wr_ring_u32(xe, &map, i / sizeof(u32) +
- XE_GUC_CONTEXT_WQ_EL_INFO_DATA_1_CTX_DESC_LOW,
- WQ_SIZE / sizeof(u32), val);
- } else if (drm_WARN_ON_ONCE(&xe->drm, type != WQ_TYPE_NOOP)) {
- break;
- }
-
- i += (len_dw + 1) * sizeof(u32);
- }
-
- if ((i % WQ_SIZE) != (q->guc->wqi_tail % WQ_SIZE)) {
- xe_gt_err(q->gt, "Exec queue fixups incomplete - wqi parse failed\n");
- return -EBADMSG;
- }
- return 0;
-}
-
#define RESUME_PENDING ~0x0ull
-static void submit_exec_queue(struct xe_exec_queue *q)
+static void submit_exec_queue(struct xe_exec_queue *q, struct xe_sched_job *job)
{
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_lrc *lrc = q->lrc[0];
@@ -825,10 +822,13 @@ static void submit_exec_queue(struct xe_exec_queue *q)
xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
- if (xe_exec_queue_is_parallel(q))
- wq_item_append(q);
- else
- xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
+ if (!job->skip_emit || job->last_replay) {
+ if (xe_exec_queue_is_parallel(q))
+ wq_item_append(q);
+ else
+ xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
+ job->last_replay = false;
+ }
if (exec_queue_suspended(q) && !xe_exec_queue_is_parallel(q))
return;
@@ -870,54 +870,33 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
struct xe_sched_job *job = to_xe_sched_job(drm_job);
struct xe_exec_queue *q = job->q;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct dma_fence *fence = NULL;
- bool lr = xe_exec_queue_is_lr(q);
+ bool lr = xe_exec_queue_is_lr(q), killed_or_banned_or_wedged =
+ exec_queue_killed_or_banned_or_wedged(q);
xe_gt_assert(guc_to_gt(guc), !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
exec_queue_banned(q) || exec_queue_suspended(q));
trace_xe_sched_job_run(job);
- if (!exec_queue_killed_or_banned_or_wedged(q) && !xe_sched_job_is_error(job)) {
+ if (!killed_or_banned_or_wedged && !xe_sched_job_is_error(job)) {
if (!exec_queue_registered(q))
register_exec_queue(q, GUC_CONTEXT_NORMAL);
- if (!lr) /* LR jobs are emitted in the exec IOCTL */
+ if (!job->skip_emit)
q->ring_ops->emit_job(job);
- submit_exec_queue(q);
+ submit_exec_queue(q, job);
+ job->skip_emit = false;
}
- if (lr) {
- xe_sched_job_set_error(job, -EOPNOTSUPP);
- dma_fence_put(job->fence); /* Drop ref from xe_sched_job_arm */
- } else {
- fence = job->fence;
- }
-
- return fence;
-}
-
-/**
- * xe_guc_jobs_ring_rebase - Re-emit ring commands of requests pending
- * on all queues under a guc.
- * @guc: the &xe_guc struct instance
- */
-void xe_guc_jobs_ring_rebase(struct xe_guc *guc)
-{
- struct xe_exec_queue *q;
- unsigned long index;
-
/*
- * This routine is used within VF migration recovery. This means
- * using the lock here introduces a restriction: we cannot wait
- * for any GFX HW response while the lock is taken.
+ * We don't care about job-fence ordering in LR VMs because these fences
+ * are never exported; they are used solely to keep jobs on the pending
+ * list. Once a queue enters an error state, there's no need to track
+ * them.
*/
- mutex_lock(&guc->submission_state.lock);
- xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
- if (exec_queue_killed_or_banned_or_wedged(q))
- continue;
- xe_exec_queue_jobs_ring_restore(q);
- }
- mutex_unlock(&guc->submission_state.lock);
+ if (killed_or_banned_or_wedged && lr)
+ xe_sched_job_set_error(job, -ECANCELED);
+
+ return job->fence;
}
static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
@@ -951,15 +930,17 @@ static void disable_scheduling_deregister(struct xe_guc *guc,
ret = wait_event_timeout(guc->ct.wq,
(!exec_queue_pending_enable(q) &&
!exec_queue_pending_disable(q)) ||
- xe_guc_read_stopped(guc),
+ xe_guc_read_stopped(guc) ||
+ vf_recovery(guc),
HZ * 5);
- if (!ret) {
+ if (!ret && !vf_recovery(guc)) {
struct xe_gpu_scheduler *sched = &q->guc->sched;
xe_gt_warn(q->gt, "Pending enable/disable failed to respond\n");
xe_sched_submission_start(sched);
xe_gt_reset_async(q->gt);
- xe_sched_tdr_queue_imm(sched);
+ if (!xe_exec_queue_is_lr(q))
+ xe_sched_tdr_queue_imm(sched);
return;
}
@@ -1051,9 +1032,14 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
struct xe_exec_queue *q = ge->q;
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_gpu_scheduler *sched = &ge->sched;
+ struct xe_sched_job *job;
bool wedged = false;
xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_lr(q));
+
+ if (vf_recovery(guc))
+ return;
+
trace_xe_exec_queue_lr_cleanup(q);
if (!exec_queue_killed(q))
@@ -1086,7 +1072,11 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
*/
ret = wait_event_timeout(guc->ct.wq,
!exec_queue_pending_disable(q) ||
- xe_guc_read_stopped(guc), HZ * 5);
+ xe_guc_read_stopped(guc) ||
+ vf_recovery(guc), HZ * 5);
+ if (vf_recovery(guc))
+ return;
+
if (!ret) {
xe_gt_warn(q->gt, "Schedule disable failed to respond, guc_id=%d\n",
q->guc->id);
@@ -1101,7 +1091,16 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
if (!exec_queue_killed(q) && !xe_lrc_ring_is_idle(q->lrc[0]))
xe_devcoredump(q, NULL, "LR job cleanup, guc_id=%d", q->guc->id);
+ xe_hw_fence_irq_stop(q->fence_irq);
+
xe_sched_submission_start(sched);
+
+ spin_lock(&sched->base.job_list_lock);
+ list_for_each_entry(job, &sched->base.pending_list, drm.list)
+ xe_sched_job_set_error(job, -ECANCELED);
+ spin_unlock(&sched->base.job_list_lock);
+
+ xe_hw_fence_irq_start(q->fence_irq);
}
#define ADJUST_FIVE_PERCENT(__t) mul_u64_u32_div(__t, 105, 100)
@@ -1167,12 +1166,14 @@ static void enable_scheduling(struct xe_exec_queue *q)
ret = wait_event_timeout(guc->ct.wq,
!exec_queue_pending_enable(q) ||
- xe_guc_read_stopped(guc), HZ * 5);
- if (!ret || xe_guc_read_stopped(guc)) {
+ xe_guc_read_stopped(guc) ||
+ vf_recovery(guc), HZ * 5);
+ if ((!ret && !vf_recovery(guc)) || xe_guc_read_stopped(guc)) {
xe_gt_warn(guc_to_gt(guc), "Schedule enable failed to respond");
set_exec_queue_banned(q);
xe_gt_reset_async(q->gt);
- xe_sched_tdr_queue_imm(&q->guc->sched);
+ if (!xe_exec_queue_is_lr(q))
+ xe_sched_tdr_queue_imm(&q->guc->sched);
}
}
@@ -1230,13 +1231,16 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
int i = 0;
bool wedged = false, skip_timeout_check;
+ xe_gt_assert(guc_to_gt(guc), !xe_exec_queue_is_lr(q));
+
/*
* TDR has fired before free job worker. Common if exec queue
* immediately closed after last fence signaled. Add back to pending
* list so job can be freed and kick scheduler ensuring free job is not
* lost.
*/
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags) ||
+ vf_recovery(guc))
return DRM_GPU_SCHED_STAT_NO_HANG;
/* Kill the run_job entry point */
@@ -1288,7 +1292,10 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
ret = wait_event_timeout(guc->ct.wq,
(!exec_queue_pending_enable(q) &&
!exec_queue_pending_disable(q)) ||
- xe_guc_read_stopped(guc), HZ * 5);
+ xe_guc_read_stopped(guc) ||
+ vf_recovery(guc), HZ * 5);
+ if (vf_recovery(guc))
+ goto handle_vf_resume;
if (!ret || xe_guc_read_stopped(guc))
goto trigger_reset;
@@ -1313,7 +1320,10 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
smp_rmb();
ret = wait_event_timeout(guc->ct.wq,
!exec_queue_pending_disable(q) ||
- xe_guc_read_stopped(guc), HZ * 5);
+ xe_guc_read_stopped(guc) ||
+ vf_recovery(guc), HZ * 5);
+ if (vf_recovery(guc))
+ goto handle_vf_resume;
if (!ret || xe_guc_read_stopped(guc)) {
trigger_reset:
if (!ret)
@@ -1409,6 +1419,7 @@ trigger_reset:
return DRM_GPU_SCHED_STAT_RESET;
sched_enable:
+ set_exec_queue_pending_tdr_exit(q);
enable_scheduling(q);
rearm:
/*
@@ -1417,6 +1428,7 @@ rearm:
* some thought, do this in a follow up.
*/
xe_sched_submission_start(sched);
+handle_vf_resume:
return DRM_GPU_SCHED_STAT_NO_HANG;
}
@@ -1523,11 +1535,24 @@ static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *ms
static void __suspend_fence_signal(struct xe_exec_queue *q)
{
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+
if (!q->guc->suspend_pending)
return;
WRITE_ONCE(q->guc->suspend_pending, false);
- wake_up(&q->guc->suspend_wait);
+
+ /*
+ * We use a GuC shared wait queue for VFs because the VF resfix start
+ * interrupt must be able to wake all instances of suspend_wait. This
+ * prevents the VF migration worker from being starved during
+ * scheduling.
+ */
+ if (IS_SRIOV_VF(xe))
+ wake_up_all(&guc->ct.wq);
+ else
+ wake_up(&q->guc->suspend_wait);
}
static void suspend_fence_signal(struct xe_exec_queue *q)
@@ -1548,8 +1573,9 @@ static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) &&
exec_queue_enabled(q)) {
- wait_event(guc->ct.wq, (q->guc->resume_time != RESUME_PENDING ||
- xe_guc_read_stopped(guc)) && !exec_queue_pending_disable(q));
+ wait_event(guc->ct.wq, vf_recovery(guc) ||
+ ((q->guc->resume_time != RESUME_PENDING ||
+ xe_guc_read_stopped(guc)) && !exec_queue_pending_disable(q)));
if (!xe_guc_read_stopped(guc)) {
s64 since_resume_ms =
@@ -1578,6 +1604,7 @@ static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
clear_exec_queue_suspended(q);
if (!exec_queue_enabled(q)) {
q->guc->resume_time = RESUME_PENDING;
+ set_exec_queue_pending_resume(q);
enable_scheduling(q);
}
} else {
@@ -1591,6 +1618,7 @@ static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
#define RESUME 4
#define OPCODE_MASK 0xf
#define MSG_LOCKED BIT(8)
+#define MSG_HEAD BIT(9)
static void guc_exec_queue_process_msg(struct xe_sched_msg *msg)
{
@@ -1653,7 +1681,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
msecs_to_jiffies(q->sched_props.job_timeout_ms);
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
- NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
+ NULL, xe_lrc_ring_size() / MAX_JOB_SIZE_BYTES, 64,
timeout, guc_to_gt(guc)->ordered_wq, NULL,
q->name, gt_to_xe(q->gt)->drm.dev);
if (err)
@@ -1675,7 +1703,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
q->entity = &ge->entity;
- if (xe_guc_read_stopped(guc))
+ if (xe_guc_read_stopped(guc) || vf_recovery(guc))
xe_sched_stop(sched);
mutex_unlock(&guc->submission_state.lock);
@@ -1715,12 +1743,24 @@ static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg
msg->private_data = q;
trace_xe_sched_msg_add(msg);
- if (opcode & MSG_LOCKED)
+ if (opcode & MSG_HEAD)
+ xe_sched_add_msg_head(&q->guc->sched, msg);
+ else if (opcode & MSG_LOCKED)
xe_sched_add_msg_locked(&q->guc->sched, msg);
else
xe_sched_add_msg(&q->guc->sched, msg);
}
+static void guc_exec_queue_try_add_msg_head(struct xe_exec_queue *q,
+ struct xe_sched_msg *msg,
+ u32 opcode)
+{
+ if (!list_empty(&msg->link))
+ return;
+
+ guc_exec_queue_add_msg(q, msg, opcode | MSG_LOCKED | MSG_HEAD);
+}
+
static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q,
struct xe_sched_msg *msg,
u32 opcode)
@@ -1821,6 +1861,7 @@ static int guc_exec_queue_suspend(struct xe_exec_queue *q)
static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
int ret;
/*
@@ -1828,11 +1869,21 @@ static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
* suspend_pending upon kill but to be paranoid but races in which
* suspend_pending is set after kill also check kill here.
*/
- ret = wait_event_interruptible_timeout(q->guc->suspend_wait,
- !READ_ONCE(q->guc->suspend_pending) ||
- exec_queue_killed(q) ||
- xe_guc_read_stopped(guc),
- HZ * 5);
+#define WAIT_COND \
+ (!READ_ONCE(q->guc->suspend_pending) || exec_queue_killed(q) || \
+ xe_guc_read_stopped(guc))
+
+retry:
+ if (IS_SRIOV_VF(xe))
+ ret = wait_event_interruptible_timeout(guc->ct.wq, WAIT_COND ||
+ vf_recovery(guc),
+ HZ * 5);
+ else
+ ret = wait_event_interruptible_timeout(q->guc->suspend_wait,
+ WAIT_COND, HZ * 5);
+
+ if (vf_recovery(guc) && !xe_device_wedged((guc_to_xe(guc))))
+ return -EAGAIN;
if (!ret) {
xe_gt_warn(guc_to_gt(guc),
@@ -1840,8 +1891,13 @@ static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
q->guc->id);
/* XXX: Trigger GT reset? */
return -ETIME;
+ } else if (IS_SRIOV_VF(xe) && !WAIT_COND) {
+ /* Corner case on RESFIX DONE where vf_recovery() changes */
+ goto retry;
}
+#undef WAIT_COND
+
return ret < 0 ? ret : 0;
}
@@ -1936,47 +1992,13 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
}
}
-/**
- * xe_guc_submit_reset_block - Disallow reset calls on given GuC.
- * @guc: the &xe_guc struct instance
- */
-int xe_guc_submit_reset_block(struct xe_guc *guc)
-{
- return atomic_fetch_or(1, &guc->submission_state.reset_blocked);
-}
-
-/**
- * xe_guc_submit_reset_unblock - Allow back reset calls on given GuC.
- * @guc: the &xe_guc struct instance
- */
-void xe_guc_submit_reset_unblock(struct xe_guc *guc)
-{
- atomic_set_release(&guc->submission_state.reset_blocked, 0);
- wake_up_all(&guc->ct.wq);
-}
-
-static int guc_submit_reset_is_blocked(struct xe_guc *guc)
-{
- return atomic_read_acquire(&guc->submission_state.reset_blocked);
-}
-
-/* Maximum time of blocking reset */
-#define RESET_BLOCK_PERIOD_MAX (HZ * 5)
-
-/**
- * xe_guc_wait_reset_unblock - Wait until reset blocking flag is lifted, or timeout.
- * @guc: the &xe_guc struct instance
- */
-int xe_guc_wait_reset_unblock(struct xe_guc *guc)
-{
- return wait_event_timeout(guc->ct.wq,
- !guc_submit_reset_is_blocked(guc), RESET_BLOCK_PERIOD_MAX);
-}
-
int xe_guc_submit_reset_prepare(struct xe_guc *guc)
{
int ret;
+ if (xe_gt_WARN_ON(guc_to_gt(guc), vf_recovery(guc)))
+ return 0;
+
if (!guc->submission_state.initialized)
return 0;
@@ -2026,6 +2048,119 @@ void xe_guc_submit_stop(struct xe_guc *guc)
}
+static void guc_exec_queue_revert_pending_state_change(struct xe_guc *guc,
+ struct xe_exec_queue *q)
+{
+ bool pending_enable, pending_disable, pending_resume;
+
+ pending_enable = exec_queue_pending_enable(q);
+ pending_resume = exec_queue_pending_resume(q);
+
+ if (pending_enable && pending_resume) {
+ q->guc->needs_resume = true;
+ xe_gt_dbg(guc_to_gt(guc), "Replay RESUME - guc_id=%d",
+ q->guc->id);
+ }
+
+ if (pending_enable && !pending_resume &&
+ !exec_queue_pending_tdr_exit(q)) {
+ clear_exec_queue_registered(q);
+ if (xe_exec_queue_is_lr(q))
+ xe_exec_queue_put(q);
+ xe_gt_dbg(guc_to_gt(guc), "Replay REGISTER - guc_id=%d",
+ q->guc->id);
+ }
+
+ if (pending_enable) {
+ clear_exec_queue_enabled(q);
+ clear_exec_queue_pending_resume(q);
+ clear_exec_queue_pending_tdr_exit(q);
+ clear_exec_queue_pending_enable(q);
+ xe_gt_dbg(guc_to_gt(guc), "Replay ENABLE - guc_id=%d",
+ q->guc->id);
+ }
+
+ if (exec_queue_destroyed(q) && exec_queue_registered(q)) {
+ clear_exec_queue_destroyed(q);
+ if (exec_queue_extra_ref(q))
+ xe_exec_queue_put(q);
+ else
+ q->guc->needs_cleanup = true;
+ clear_exec_queue_extra_ref(q);
+ xe_gt_dbg(guc_to_gt(guc), "Replay CLEANUP - guc_id=%d",
+ q->guc->id);
+ }
+
+ pending_disable = exec_queue_pending_disable(q);
+
+ if (pending_disable && exec_queue_suspended(q)) {
+ clear_exec_queue_suspended(q);
+ q->guc->needs_suspend = true;
+ xe_gt_dbg(guc_to_gt(guc), "Replay SUSPEND - guc_id=%d",
+ q->guc->id);
+ }
+
+ if (pending_disable) {
+ if (!pending_enable)
+ set_exec_queue_enabled(q);
+ clear_exec_queue_pending_disable(q);
+ clear_exec_queue_check_timeout(q);
+ xe_gt_dbg(guc_to_gt(guc), "Replay DISABLE - guc_id=%d",
+ q->guc->id);
+ }
+
+ q->guc->resume_time = 0;
+}
+
+/*
+ * This function is quite complex but only real way to ensure no state is lost
+ * during VF resume flows. The function scans the queue state, make adjustments
+ * as needed, and queues jobs / messages which replayed upon unpause.
+ */
+static void guc_exec_queue_pause(struct xe_guc *guc, struct xe_exec_queue *q)
+{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+ struct xe_sched_job *job;
+ int i;
+
+ lockdep_assert_held(&guc->submission_state.lock);
+
+ /* Stop scheduling + flush any DRM scheduler operations */
+ xe_sched_submission_stop(sched);
+ if (xe_exec_queue_is_lr(q))
+ cancel_work_sync(&q->guc->lr_tdr);
+ else
+ cancel_delayed_work_sync(&sched->base.work_tdr);
+
+ guc_exec_queue_revert_pending_state_change(guc, q);
+
+ if (xe_exec_queue_is_parallel(q)) {
+ struct xe_device *xe = guc_to_xe(guc);
+ struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
+
+ /*
+ * NOP existing WQ commands that may contain stale GGTT
+ * addresses. These will be replayed upon unpause. The hardware
+ * seems to get confused if the WQ head/tail pointers are
+ * adjusted.
+ */
+ for (i = 0; i < WQ_SIZE / sizeof(u32); ++i)
+ parallel_write(xe, map, wq[i],
+ FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
+ FIELD_PREP(WQ_LEN_MASK, 0));
+ }
+
+ job = xe_sched_first_pending_job(sched);
+ if (job) {
+ /*
+ * Adjust software tail so jobs submitted overwrite previous
+ * position in ring buffer with new GGTT addresses.
+ */
+ for (i = 0; i < q->width; ++i)
+ q->lrc[i]->ring.tail = job->ptrs[i].head;
+ }
+}
+
/**
* xe_guc_submit_pause - Stop further runs of submission tasks on given GuC.
* @guc: the &xe_guc struct instance whose scheduler is to be disabled
@@ -2035,8 +2170,17 @@ void xe_guc_submit_pause(struct xe_guc *guc)
struct xe_exec_queue *q;
unsigned long index;
- xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
- xe_sched_submission_stop_async(&q->guc->sched);
+ xe_gt_assert(guc_to_gt(guc), vf_recovery(guc));
+
+ mutex_lock(&guc->submission_state.lock);
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ /* Prevent redundant attempts to stop parallel queues */
+ if (q->guc->id != index)
+ continue;
+
+ guc_exec_queue_pause(guc, q);
+ }
+ mutex_unlock(&guc->submission_state.lock);
}
static void guc_exec_queue_start(struct xe_exec_queue *q)
@@ -2044,11 +2188,25 @@ static void guc_exec_queue_start(struct xe_exec_queue *q)
struct xe_gpu_scheduler *sched = &q->guc->sched;
if (!exec_queue_killed_or_banned_or_wedged(q)) {
+ struct xe_sched_job *job = xe_sched_first_pending_job(sched);
int i;
trace_xe_exec_queue_resubmit(q);
- for (i = 0; i < q->width; ++i)
- xe_lrc_set_ring_head(q->lrc[i], q->lrc[i]->ring.tail);
+ if (job) {
+ for (i = 0; i < q->width; ++i) {
+ /*
+ * The GuC context is unregistered at this point
+ * time, adjusting software ring tail ensures
+ * jobs are rewritten in original placement,
+ * adjusting LRC tail ensures the newly loaded
+ * GuC / contexts only view the LRC tail
+ * increasing as jobs are written out.
+ */
+ q->lrc[i]->ring.tail = job->ptrs[i].head;
+ xe_lrc_set_ring_tail(q->lrc[i],
+ xe_lrc_ring_head(q->lrc[i]));
+ }
+ }
xe_sched_resubmit_jobs(sched);
}
@@ -2079,11 +2237,100 @@ int xe_guc_submit_start(struct xe_guc *guc)
return 0;
}
-static void guc_exec_queue_unpause(struct xe_exec_queue *q)
+static void guc_exec_queue_unpause_prepare(struct xe_guc *guc,
+ struct xe_exec_queue *q)
{
struct xe_gpu_scheduler *sched = &q->guc->sched;
+ struct drm_sched_job *s_job;
+ struct xe_sched_job *job = NULL;
+
+ list_for_each_entry(s_job, &sched->base.pending_list, list) {
+ job = to_xe_sched_job(s_job);
+
+ xe_gt_dbg(guc_to_gt(guc), "Replay JOB - guc_id=%d, seqno=%d",
+ q->guc->id, xe_sched_job_seqno(job));
+
+ q->ring_ops->emit_job(job);
+ job->skip_emit = true;
+ }
+ if (job)
+ job->last_replay = true;
+}
+
+/**
+ * xe_guc_submit_unpause_prepare - Prepare unpause submission tasks on given GuC.
+ * @guc: the &xe_guc struct instance whose scheduler is to be prepared for unpause
+ */
+void xe_guc_submit_unpause_prepare(struct xe_guc *guc)
+{
+ struct xe_exec_queue *q;
+ unsigned long index;
+
+ xe_gt_assert(guc_to_gt(guc), vf_recovery(guc));
+
+ mutex_lock(&guc->submission_state.lock);
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ /* Prevent redundant attempts to stop parallel queues */
+ if (q->guc->id != index)
+ continue;
+
+ guc_exec_queue_unpause_prepare(guc, q);
+ }
+ mutex_unlock(&guc->submission_state.lock);
+}
+
+static void guc_exec_queue_replay_pending_state_change(struct xe_exec_queue *q)
+{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+ struct xe_sched_msg *msg;
+
+ if (q->guc->needs_cleanup) {
+ msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
+
+ guc_exec_queue_add_msg(q, msg, CLEANUP);
+ q->guc->needs_cleanup = false;
+ }
+
+ if (q->guc->needs_suspend) {
+ msg = q->guc->static_msgs + STATIC_MSG_SUSPEND;
+
+ xe_sched_msg_lock(sched);
+ guc_exec_queue_try_add_msg_head(q, msg, SUSPEND);
+ xe_sched_msg_unlock(sched);
+
+ q->guc->needs_suspend = false;
+ }
+
+ /*
+ * The resume must be in the message queue before the suspend as it is
+ * not possible for a resume to be issued if a suspend pending is, but
+ * the inverse is possible.
+ */
+ if (q->guc->needs_resume) {
+ msg = q->guc->static_msgs + STATIC_MSG_RESUME;
+
+ xe_sched_msg_lock(sched);
+ guc_exec_queue_try_add_msg_head(q, msg, RESUME);
+ xe_sched_msg_unlock(sched);
+
+ q->guc->needs_resume = false;
+ }
+}
+
+static void guc_exec_queue_unpause(struct xe_guc *guc, struct xe_exec_queue *q)
+{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+ bool needs_tdr = exec_queue_killed_or_banned_or_wedged(q);
+
+ lockdep_assert_held(&guc->submission_state.lock);
+
+ xe_sched_resubmit_jobs(sched);
+ guc_exec_queue_replay_pending_state_change(q);
xe_sched_submission_start(sched);
+ if (needs_tdr)
+ xe_guc_exec_queue_trigger_cleanup(q);
+ xe_sched_submission_resume_tdr(sched);
}
/**
@@ -2095,10 +2342,43 @@ void xe_guc_submit_unpause(struct xe_guc *guc)
struct xe_exec_queue *q;
unsigned long index;
- xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
- guc_exec_queue_unpause(q);
+ mutex_lock(&guc->submission_state.lock);
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ /*
+ * Prevent redundant attempts to stop parallel queues, or queues
+ * created after resfix done.
+ */
+ if (q->guc->id != index ||
+ !READ_ONCE(q->guc->sched.base.pause_submit))
+ continue;
- wake_up_all(&guc->ct.wq);
+ guc_exec_queue_unpause(guc, q);
+ }
+ mutex_unlock(&guc->submission_state.lock);
+}
+
+/**
+ * xe_guc_submit_pause_abort - Abort all paused submission task on given GuC.
+ * @guc: the &xe_guc struct instance whose scheduler is to be aborted
+ */
+void xe_guc_submit_pause_abort(struct xe_guc *guc)
+{
+ struct xe_exec_queue *q;
+ unsigned long index;
+
+ mutex_lock(&guc->submission_state.lock);
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+
+ /* Prevent redundant attempts to stop parallel queues */
+ if (q->guc->id != index)
+ continue;
+
+ xe_sched_submission_start(sched);
+ if (exec_queue_killed_or_banned_or_wedged(q))
+ xe_guc_exec_queue_trigger_cleanup(q);
+ }
+ mutex_unlock(&guc->submission_state.lock);
}
static struct xe_exec_queue *
@@ -2150,6 +2430,8 @@ static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q,
xe_gt_assert(guc_to_gt(guc), exec_queue_pending_enable(q));
q->guc->resume_time = ktime_get();
+ clear_exec_queue_pending_resume(q);
+ clear_exec_queue_pending_tdr_exit(q);
clear_exec_queue_pending_enable(q);
smp_wmb();
wake_up_all(&guc->ct.wq);
@@ -2677,13 +2959,13 @@ int xe_guc_contexts_hwsp_rebase(struct xe_guc *guc, void *scratch)
mutex_lock(&guc->submission_state.lock);
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ /* Prevent redundant attempts to stop parallel queues */
+ if (q->guc->id != index)
+ continue;
+
err = xe_exec_queue_contexts_hwsp_rebase(q, scratch);
if (err)
break;
- if (xe_exec_queue_is_parallel(q))
- err = wq_items_rebase(q);
- if (err)
- break;
}
mutex_unlock(&guc->submission_state.lock);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
index 78c3f07e31a0..b49a2748ec46 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit.h
@@ -22,9 +22,8 @@ void xe_guc_submit_stop(struct xe_guc *guc);
int xe_guc_submit_start(struct xe_guc *guc);
void xe_guc_submit_pause(struct xe_guc *guc);
void xe_guc_submit_unpause(struct xe_guc *guc);
-int xe_guc_submit_reset_block(struct xe_guc *guc);
-void xe_guc_submit_reset_unblock(struct xe_guc *guc);
-int xe_guc_wait_reset_unblock(struct xe_guc *guc);
+void xe_guc_submit_unpause_prepare(struct xe_guc *guc);
+void xe_guc_submit_pause_abort(struct xe_guc *guc);
void xe_guc_submit_wedge(struct xe_guc *guc);
int xe_guc_read_stopped(struct xe_guc *guc);
@@ -36,8 +35,6 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len);
int xe_guc_error_capture_handler(struct xe_guc *guc, u32 *msg, u32 len);
-void xe_guc_jobs_ring_rebase(struct xe_guc *guc);
-
struct xe_guc_submit_exec_queue_snapshot *
xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q);
void
diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c
index 7e43b2dd6a32..0a70c8924582 100644
--- a/drivers/gpu/drm/xe/xe_huc.c
+++ b/drivers/gpu/drm/xe/xe_huc.c
@@ -66,14 +66,18 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc)
int xe_huc_init(struct xe_huc *huc)
{
struct xe_gt *gt = huc_to_gt(huc);
- struct xe_tile *tile = gt_to_tile(gt);
struct xe_device *xe = gt_to_xe(gt);
int ret;
huc->fw.type = XE_UC_FW_TYPE_HUC;
- /* On platforms with a media GT the HuC is only available there */
- if (tile->media_gt && (gt != tile->media_gt)) {
+ /*
+ * The HuC is only available on the media GT on most platforms. The
+ * exception to that rule are the old Xe1 platforms where there was
+ * no separate GT for media IP, so the HuC was part of the primary
+ * GT. Such platforms have graphics versions 12.55 and earlier.
+ */
+ if (!xe_gt_is_media_type(gt) && GRAPHICS_VERx100(xe) > 1255) {
xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_NOT_SUPPORTED);
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 1cf623b4a5bc..6a9e2a4272dd 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -346,17 +346,26 @@ void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
}
-static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_gt *gt,
+static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe)
{
+ /*
+ * Xe3p no longer supports load balance mode, so "fixed cslice" mode
+ * is automatic and no RCU_MODE programming is required.
+ */
+ if (GRAPHICS_VER(gt_to_xe(gt)) >= 35)
+ return false;
+
return xe_gt_ccs_mode_enabled(gt) &&
- xe_rtp_match_first_render_or_compute(gt, hwe);
+ xe_rtp_match_first_render_or_compute(xe, gt, hwe);
}
-static bool xe_rtp_cfeg_wmtp_disabled(const struct xe_gt *gt,
+static bool xe_rtp_cfeg_wmtp_disabled(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe)
{
- if (GRAPHICS_VER(gt_to_xe(gt)) < 20)
+ if (GRAPHICS_VER(xe) < 20)
return false;
if (hwe->class != XE_ENGINE_CLASS_COMPUTE &&
@@ -709,27 +718,52 @@ static void read_media_fuses(struct xe_gt *gt)
}
}
+static u32 infer_svccopy_from_meml3(struct xe_gt *gt)
+{
+ u32 meml3 = REG_FIELD_GET(MEML3_EN_MASK,
+ xe_mmio_read32(&gt->mmio, MIRROR_FUSE3));
+ u32 svccopy_mask = 0;
+
+ /*
+ * Each of the four meml3 bits determines the fusing of two service
+ * copy engines.
+ */
+ for (int i = 0; i < 4; i++)
+ svccopy_mask |= (meml3 & BIT(i)) ? 0b11 << 2 * i : 0;
+
+ return svccopy_mask;
+}
+
+static u32 read_svccopy_fuses(struct xe_gt *gt)
+{
+ return REG_FIELD_GET(FUSE_SERVICE_COPY_ENABLE_MASK,
+ xe_mmio_read32(&gt->mmio, SERVICE_COPY_ENABLE));
+}
+
static void read_copy_fuses(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
u32 bcs_mask;
- if (GRAPHICS_VERx100(xe) < 1260 || GRAPHICS_VERx100(xe) >= 1270)
- return;
-
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
- bcs_mask = xe_mmio_read32(&gt->mmio, MIRROR_FUSE3);
- bcs_mask = REG_FIELD_GET(MEML3_EN_MASK, bcs_mask);
+ if (GRAPHICS_VER(xe) >= 35)
+ bcs_mask = read_svccopy_fuses(gt);
+ else if (GRAPHICS_VERx100(xe) == 1260)
+ bcs_mask = infer_svccopy_from_meml3(gt);
+ else
+ return;
- /* BCS0 is always present; only BCS1-BCS8 may be fused off */
- for (int i = XE_HW_ENGINE_BCS1, j = 0; i <= XE_HW_ENGINE_BCS8; ++i, ++j) {
+ /* Only BCS1-BCS8 may be fused off */
+ bcs_mask <<= XE_HW_ENGINE_BCS1;
+ for (int i = XE_HW_ENGINE_BCS1; i <= XE_HW_ENGINE_BCS8; ++i) {
if (!(gt->info.engine_mask & BIT(i)))
continue;
- if (!(BIT(j / 2) & bcs_mask)) {
+ if (!(bcs_mask & BIT(i))) {
gt->info.engine_mask &= ~BIT(i);
- xe_gt_info(gt, "bcs%u fused off\n", j);
+ xe_gt_info(gt, "bcs%u fused off\n",
+ i - XE_HW_ENGINE_BCS0);
}
}
}
@@ -870,7 +904,7 @@ void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
if (hwe->irq_handler)
hwe->irq_handler(hwe, intr_vec);
- if (intr_vec & GT_RENDER_USER_INTERRUPT)
+ if (intr_vec & GT_MI_USER_INTERRUPT)
xe_hw_fence_irq_run(hwe->fence_irq);
}
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index b6790589e623..97879daeefc1 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -658,8 +658,6 @@ static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
struct xe_reg rapl_limit;
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
- xe_pm_runtime_get(hwmon->xe);
-
if (hwmon->xe->info.has_mbx_power_limits) {
xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, &uval);
} else if (power_attr != PL2_HWMON_ATTR) {
@@ -669,8 +667,6 @@ static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
}
ret = (uval & PWR_LIM_EN) ? attr->mode : 0;
- xe_pm_runtime_put(hwmon->xe);
-
return ret;
}
@@ -1096,8 +1092,6 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata;
int ret;
- xe_pm_runtime_get(hwmon->xe);
-
switch (type) {
case hwmon_temp:
ret = xe_hwmon_temp_is_visible(hwmon, attr, channel);
@@ -1122,8 +1116,6 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
break;
}
- xe_pm_runtime_put(hwmon->xe);
-
return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_i2c.c b/drivers/gpu/drm/xe/xe_i2c.c
index 48dfcb41fa08..0b5452be0c87 100644
--- a/drivers/gpu/drm/xe/xe_i2c.c
+++ b/drivers/gpu/drm/xe/xe_i2c.c
@@ -160,6 +160,11 @@ bool xe_i2c_present(struct xe_device *xe)
return xe->i2c && xe->i2c->ep.cookie == XE_I2C_EP_COOKIE_DEVICE;
}
+static bool xe_i2c_irq_present(struct xe_device *xe)
+{
+ return xe->i2c && xe->i2c->adapter_irq;
+}
+
/**
* xe_i2c_irq_handler: Handler for I2C interrupts
* @xe: xe device instance
@@ -170,13 +175,33 @@ bool xe_i2c_present(struct xe_device *xe)
*/
void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl)
{
- if (!xe->i2c || !xe->i2c->adapter_irq)
+ if (!xe_i2c_irq_present(xe))
return;
if (master_ctl & I2C_IRQ)
generic_handle_irq_safe(xe->i2c->adapter_irq);
}
+void xe_i2c_irq_reset(struct xe_device *xe)
+{
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
+
+ if (!xe_i2c_irq_present(xe))
+ return;
+
+ xe_mmio_rmw32(mmio, I2C_BRIDGE_PCICFGCTL, ACPI_INTR_EN, 0);
+}
+
+void xe_i2c_irq_postinstall(struct xe_device *xe)
+{
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
+
+ if (!xe_i2c_irq_present(xe))
+ return;
+
+ xe_mmio_rmw32(mmio, I2C_BRIDGE_PCICFGCTL, 0, ACPI_INTR_EN);
+}
+
static int xe_i2c_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw_irq_num)
{
@@ -334,6 +359,7 @@ int xe_i2c_probe(struct xe_device *xe)
if (ret)
goto err_remove_irq;
+ xe_i2c_irq_postinstall(xe);
return devm_add_action_or_reset(drm_dev, xe_i2c_remove, i2c);
err_remove_irq:
diff --git a/drivers/gpu/drm/xe/xe_i2c.h b/drivers/gpu/drm/xe/xe_i2c.h
index ecd5f10358e2..425d8160835f 100644
--- a/drivers/gpu/drm/xe/xe_i2c.h
+++ b/drivers/gpu/drm/xe/xe_i2c.h
@@ -51,12 +51,16 @@ struct xe_i2c {
int xe_i2c_probe(struct xe_device *xe);
bool xe_i2c_present(struct xe_device *xe);
void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl);
+void xe_i2c_irq_postinstall(struct xe_device *xe);
+void xe_i2c_irq_reset(struct xe_device *xe);
void xe_i2c_pm_suspend(struct xe_device *xe);
void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold);
#else
static inline int xe_i2c_probe(struct xe_device *xe) { return 0; }
static inline bool xe_i2c_present(struct xe_device *xe) { return false; }
static inline void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl) { }
+static inline void xe_i2c_irq_postinstall(struct xe_device *xe) { }
+static inline void xe_i2c_irq_reset(struct xe_device *xe) { }
static inline void xe_i2c_pm_suspend(struct xe_device *xe) { }
static inline void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold) { }
#endif
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index 870edaf69388..e5ed0242f7b1 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -139,68 +139,112 @@ void xe_irq_enable_hwe(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
struct xe_mmio *mmio = &gt->mmio;
- u32 ccs_mask, bcs_mask;
- u32 irqs, dmask, smask;
- u32 gsc_mask = 0;
- u32 heci_mask = 0;
+ u32 common_mask, val, gsc_mask = 0, heci_mask = 0,
+ rcs_mask = 0, bcs_mask = 0, vcs_mask = 0, vecs_mask = 0,
+ ccs_mask = 0;
if (xe_device_uses_memirq(xe))
return;
if (xe_device_uc_enabled(xe)) {
- irqs = GT_RENDER_USER_INTERRUPT |
- GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
+ common_mask = GT_MI_USER_INTERRUPT |
+ GT_FLUSH_COMPLETE_INTERRUPT;
+
+ /* Enable Compute Walker Interrupt for non-MSIX platforms */
+ if (GRAPHICS_VERx100(xe) >= 3511 && !xe_device_has_msix(xe)) {
+ rcs_mask |= GT_COMPUTE_WALKER_INTERRUPT;
+ ccs_mask |= GT_COMPUTE_WALKER_INTERRUPT;
+ }
} else {
- irqs = GT_RENDER_USER_INTERRUPT |
- GT_CS_MASTER_ERROR_INTERRUPT |
- GT_CONTEXT_SWITCH_INTERRUPT |
- GT_WAIT_SEMAPHORE_INTERRUPT;
+ common_mask = GT_MI_USER_INTERRUPT |
+ GT_CS_MASTER_ERROR_INTERRUPT |
+ GT_CONTEXT_SWITCH_INTERRUPT |
+ GT_WAIT_SEMAPHORE_INTERRUPT;
}
- ccs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COMPUTE);
- bcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COPY);
-
- dmask = irqs << 16 | irqs;
- smask = irqs << 16;
+ rcs_mask |= common_mask;
+ bcs_mask |= common_mask;
+ vcs_mask |= common_mask;
+ vecs_mask |= common_mask;
+ ccs_mask |= common_mask;
if (xe_gt_is_main_type(gt)) {
+ /*
+ * For enabling the interrupts, the information about fused off
+ * engines doesn't matter much, but this also allows to check if
+ * the engine is available architecturally in the platform
+ */
+ u32 ccs_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COMPUTE);
+ u32 bcs_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COPY);
+
/* Enable interrupts for each engine class */
- xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, dmask);
- if (ccs_mask)
- xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, smask);
+ xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE,
+ REG_FIELD_PREP(ENGINE1_MASK, rcs_mask) |
+ REG_FIELD_PREP(ENGINE0_MASK, bcs_mask));
+ if (ccs_fuse_mask)
+ xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE,
+ REG_FIELD_PREP(ENGINE1_MASK, ccs_mask));
/* Unmask interrupts for each engine instance */
- xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, ~smask);
- xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, ~smask);
- if (bcs_mask & (BIT(1)|BIT(2)))
- xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask);
- if (bcs_mask & (BIT(3)|BIT(4)))
- xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask);
- if (bcs_mask & (BIT(5)|BIT(6)))
- xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask);
- if (bcs_mask & (BIT(7)|BIT(8)))
- xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask);
- if (ccs_mask & (BIT(0)|BIT(1)))
- xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~dmask);
- if (ccs_mask & (BIT(2)|BIT(3)))
- xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~dmask);
+ val = ~REG_FIELD_PREP(ENGINE1_MASK, rcs_mask);
+ xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, val);
+ val = ~REG_FIELD_PREP(ENGINE1_MASK, bcs_mask);
+ xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, val);
+
+ val = ~(REG_FIELD_PREP(ENGINE1_MASK, bcs_mask) |
+ REG_FIELD_PREP(ENGINE0_MASK, bcs_mask));
+ if (bcs_fuse_mask & (BIT(1)|BIT(2)))
+ xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, val);
+ if (bcs_fuse_mask & (BIT(3)|BIT(4)))
+ xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, val);
+ if (bcs_fuse_mask & (BIT(5)|BIT(6)))
+ xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, val);
+ if (bcs_fuse_mask & (BIT(7)|BIT(8)))
+ xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, val);
+
+ val = ~(REG_FIELD_PREP(ENGINE1_MASK, ccs_mask) |
+ REG_FIELD_PREP(ENGINE0_MASK, ccs_mask));
+ if (ccs_fuse_mask & (BIT(0)|BIT(1)))
+ xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, val);
+ if (ccs_fuse_mask & (BIT(2)|BIT(3)))
+ xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, val);
}
if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) {
+ u32 vcs_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_DECODE);
+ u32 vecs_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE);
+ u32 other_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER);
+
/* Enable interrupts for each engine class */
- xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, dmask);
+ xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE,
+ REG_FIELD_PREP(ENGINE1_MASK, vcs_mask) |
+ REG_FIELD_PREP(ENGINE0_MASK, vecs_mask));
/* Unmask interrupts for each engine instance */
- xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, ~dmask);
- xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, ~dmask);
- xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, ~dmask);
+ val = ~(REG_FIELD_PREP(ENGINE1_MASK, vcs_mask) |
+ REG_FIELD_PREP(ENGINE0_MASK, vcs_mask));
+ if (vcs_fuse_mask & (BIT(0) | BIT(1)))
+ xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, val);
+ if (vcs_fuse_mask & (BIT(2) | BIT(3)))
+ xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, val);
+ if (vcs_fuse_mask & (BIT(4) | BIT(5)))
+ xe_mmio_write32(mmio, VCS4_VCS5_INTR_MASK, val);
+ if (vcs_fuse_mask & (BIT(6) | BIT(7)))
+ xe_mmio_write32(mmio, VCS6_VCS7_INTR_MASK, val);
+
+ val = ~(REG_FIELD_PREP(ENGINE1_MASK, vecs_mask) |
+ REG_FIELD_PREP(ENGINE0_MASK, vecs_mask));
+ if (vecs_fuse_mask & (BIT(0) | BIT(1)))
+ xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, val);
+ if (vecs_fuse_mask & (BIT(2) | BIT(3)))
+ xe_mmio_write32(mmio, VECS2_VECS3_INTR_MASK, val);
/*
* the heci2 interrupt is enabled via the same register as the
* GSCCS interrupts, but it has its own mask register.
*/
- if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER)) {
- gsc_mask = irqs | GSC_ER_COMPLETE;
+ if (other_fuse_mask) {
+ gsc_mask = common_mask | GSC_ER_COMPLETE;
heci_mask = GSC_IRQ_INTF(1);
} else if (xe->info.has_heci_gscfi) {
gsc_mask = GSC_IRQ_INTF(1);
@@ -494,11 +538,15 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
static void gt_irq_reset(struct xe_tile *tile)
{
struct xe_mmio *mmio = &tile->mmio;
-
- u32 ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
- XE_ENGINE_CLASS_COMPUTE);
- u32 bcs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
- XE_ENGINE_CLASS_COPY);
+ u32 ccs_mask = ~0;
+ u32 bcs_mask = ~0;
+
+ if (tile->primary_gt) {
+ ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
+ XE_ENGINE_CLASS_COMPUTE);
+ bcs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
+ XE_ENGINE_CLASS_COPY);
+ }
/* Disable RCS, BCS, VCS and VECS class engines. */
xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, 0);
@@ -616,6 +664,7 @@ static void xe_irq_reset(struct xe_device *xe)
tile = xe_device_get_root_tile(xe);
mask_and_disable(tile, GU_MISC_IRQ_OFFSET);
xe_display_irq_reset(xe);
+ xe_i2c_irq_reset(xe);
/*
* The tile's top-level status register should be the last one
@@ -656,7 +705,8 @@ static void xe_irq_postinstall(struct xe_device *xe)
xe_memirq_postinstall(&tile->memirq);
}
- xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe));
+ xe_display_irq_postinstall(xe);
+ xe_i2c_irq_postinstall(xe);
/*
* ASLE backlight operations are reported via GUnit GSE interrupts
diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c
index 62fc5a1a332d..4dc1de482eee 100644
--- a/drivers/gpu/drm/xe/xe_lmtt.c
+++ b/drivers/gpu/drm/xe/xe_lmtt.c
@@ -17,7 +17,7 @@
#include "xe_mmio.h"
#include "xe_res_cursor.h"
#include "xe_sriov.h"
-#include "xe_sriov_printk.h"
+#include "xe_tile_sriov_printk.h"
/**
* DOC: Local Memory Translation Table
@@ -32,7 +32,7 @@
*/
#define lmtt_assert(lmtt, condition) xe_tile_assert(lmtt_to_tile(lmtt), condition)
-#define lmtt_debug(lmtt, msg...) xe_sriov_dbg_verbose(lmtt_to_xe(lmtt), "LMTT: " msg)
+#define lmtt_debug(lmtt, msg...) xe_tile_sriov_dbg_verbose(lmtt_to_tile(lmtt), "LMTT: " msg)
static bool xe_has_multi_level_lmtt(struct xe_device *xe)
{
@@ -267,15 +267,14 @@ static int lmtt_invalidate_hw(struct xe_lmtt *lmtt)
*/
void xe_lmtt_invalidate_hw(struct xe_lmtt *lmtt)
{
- struct xe_device *xe = lmtt_to_xe(lmtt);
int err;
- lmtt_assert(lmtt, IS_SRIOV_PF(xe));
+ lmtt_assert(lmtt, IS_SRIOV_PF(lmtt_to_xe(lmtt)));
err = lmtt_invalidate_hw(lmtt);
if (err)
- xe_sriov_warn(xe, "LMTT%u invalidation failed (%pe)",
- lmtt_to_tile(lmtt)->id, ERR_PTR(err));
+ xe_tile_sriov_err(lmtt_to_tile(lmtt), "LMTT invalidation failed (%pe)",
+ ERR_PTR(err));
}
static void lmtt_write_pte(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pt,
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 47e9df775072..b5083c99dd50 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -1214,8 +1214,7 @@ static int setup_bo(struct bo_setup_state *state)
ssize_t remain;
if (state->lrc->bo->vmap.is_iomem) {
- if (!state->buffer)
- return -ENOMEM;
+ xe_gt_assert(state->hwe->gt, state->buffer);
state->ptr = state->buffer;
} else {
state->ptr = state->lrc->bo->vmap.vaddr + state->offset;
@@ -1248,7 +1247,7 @@ fail:
static void finish_bo(struct bo_setup_state *state)
{
- if (!state->buffer)
+ if (!state->lrc->bo->vmap.is_iomem)
return;
xe_map_memcpy_to(gt_to_xe(state->lrc->gt), &state->lrc->bo->vmap,
@@ -1303,8 +1302,11 @@ static int setup_wa_bb(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
u32 *buf = NULL;
int ret;
- if (lrc->bo->vmap.is_iomem)
+ if (lrc->bo->vmap.is_iomem) {
buf = kmalloc(LRC_WA_BB_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
ret = xe_lrc_setup_wa_bb_with_scratch(lrc, hwe, buf);
@@ -1347,8 +1349,11 @@ setup_indirect_ctx(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
if (xe_gt_WARN_ON(lrc->gt, !state.funcs))
return 0;
- if (lrc->bo->vmap.is_iomem)
+ if (lrc->bo->vmap.is_iomem) {
state.buffer = kmalloc(state.max_size, GFP_KERNEL);
+ if (!state.buffer)
+ return -ENOMEM;
+ }
ret = setup_bo(&state);
if (ret) {
@@ -1412,8 +1417,9 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE;
- if (vm && vm->xef) /* userspace */
- bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
+
+ if ((vm && vm->xef) || init_flags & XE_LRC_CREATE_USER_CTX) /* userspace */
+ bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE | XE_BO_FLAG_FORCE_USER_VRAM;
lrc->bo = xe_bo_create_pin_map_novm(xe, tile,
bo_size,
diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
index 188565465779..2fb628da5c43 100644
--- a/drivers/gpu/drm/xe/xe_lrc.h
+++ b/drivers/gpu/drm/xe/xe_lrc.h
@@ -44,8 +44,10 @@ struct xe_lrc_snapshot {
#define LRC_WA_BB_SIZE SZ_4K
-#define XE_LRC_CREATE_RUNALONE 0x1
-#define XE_LRC_CREATE_PXP 0x2
+#define XE_LRC_CREATE_RUNALONE BIT(0)
+#define XE_LRC_CREATE_PXP BIT(1)
+#define XE_LRC_CREATE_USER_CTX BIT(2)
+
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
u32 ring_size, u16 msix_vec, u32 flags);
void xe_lrc_destroy(struct kref *ref);
@@ -74,6 +76,16 @@ static inline void xe_lrc_put(struct xe_lrc *lrc)
kref_put(&lrc->refcount, xe_lrc_destroy);
}
+/**
+ * xe_lrc_ring_size() - Xe LRC ring size
+ *
+ * Return: Size of LRC ring buffer
+ */
+static inline size_t xe_lrc_ring_size(void)
+{
+ return SZ_16K;
+}
+
size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class);
u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc);
u32 xe_lrc_regs_offset(struct xe_lrc *lrc);
diff --git a/drivers/gpu/drm/xe/xe_map.h b/drivers/gpu/drm/xe/xe_map.h
index 8d67f6ba2d95..f62e0c8b67ab 100644
--- a/drivers/gpu/drm/xe/xe_map.h
+++ b/drivers/gpu/drm/xe/xe_map.h
@@ -78,24 +78,6 @@ static inline void xe_map_write32(struct xe_device *xe, struct iosys_map *map,
iosys_map_wr(map__, offset__, type__, val__); \
})
-#define xe_map_rd_array(xe__, map__, index__, type__) \
- xe_map_rd(xe__, map__, (index__) * sizeof(type__), type__)
-
-#define xe_map_wr_array(xe__, map__, index__, type__, val__) \
- xe_map_wr(xe__, map__, (index__) * sizeof(type__), type__, val__)
-
-#define xe_map_rd_array_u32(xe__, map__, index__) \
- xe_map_rd_array(xe__, map__, index__, u32)
-
-#define xe_map_wr_array_u32(xe__, map__, index__, val__) \
- xe_map_wr_array(xe__, map__, index__, u32, val__)
-
-#define xe_map_rd_ring_u32(xe__, map__, index__, size__) \
- xe_map_rd_array_u32(xe__, map__, (index__) % (size__))
-
-#define xe_map_wr_ring_u32(xe__, map__, index__, size__, val__) \
- xe_map_wr_array_u32(xe__, map__, (index__) % (size__), val__)
-
#define xe_map_rd_field(xe__, map__, struct_offset__, struct_type__, field__) ({ \
struct xe_device *__xe = xe__; \
xe_device_assert_mem_access(__xe); \
diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c
index 49c45ec3e83c..b0c7ce0a5d1e 100644
--- a/drivers/gpu/drm/xe/xe_memirq.c
+++ b/drivers/gpu/drm/xe/xe_memirq.c
@@ -14,16 +14,15 @@
#include "xe_device.h"
#include "xe_device_types.h"
#include "xe_gt.h"
-#include "xe_gt_printk.h"
#include "xe_guc.h"
#include "xe_hw_engine.h"
#include "xe_map.h"
#include "xe_memirq.h"
+#include "xe_tile_printk.h"
#define memirq_assert(m, condition) xe_tile_assert(memirq_to_tile(m), condition)
#define memirq_printk(m, _level, _fmt, ...) \
- drm_##_level(&memirq_to_xe(m)->drm, "MEMIRQ%u: " _fmt, \
- memirq_to_tile(m)->id, ##__VA_ARGS__)
+ xe_tile_##_level(memirq_to_tile(m), "MEMIRQ: " _fmt, ##__VA_ARGS__)
#ifdef CONFIG_DRM_XE_DEBUG_MEMIRQ
#define memirq_debug(m, _fmt, ...) memirq_printk(m, dbg, _fmt, ##__VA_ARGS__)
@@ -398,8 +397,9 @@ void xe_memirq_postinstall(struct xe_memirq *memirq)
memirq_set_enable(memirq, true);
}
-static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector,
- u16 offset, const char *name)
+static bool __memirq_received(struct xe_memirq *memirq,
+ struct iosys_map *vector, u16 offset,
+ const char *name, bool clear)
{
u8 value;
@@ -409,19 +409,33 @@ static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector,
memirq_err_ratelimited(memirq,
"Unexpected memirq value %#x from %s at %u\n",
value, name, offset);
- iosys_map_wr(vector, offset, u8, 0x00);
+ if (clear)
+ iosys_map_wr(vector, offset, u8, 0x00);
}
return value;
}
+static bool memirq_received_noclear(struct xe_memirq *memirq,
+ struct iosys_map *vector,
+ u16 offset, const char *name)
+{
+ return __memirq_received(memirq, vector, offset, name, false);
+}
+
+static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector,
+ u16 offset, const char *name)
+{
+ return __memirq_received(memirq, vector, offset, name, true);
+}
+
static void memirq_dispatch_engine(struct xe_memirq *memirq, struct iosys_map *status,
struct xe_hw_engine *hwe)
{
memirq_debug(memirq, "STATUS %s %*ph\n", hwe->name, 16, status->vaddr);
- if (memirq_received(memirq, status, ilog2(GT_RENDER_USER_INTERRUPT), hwe->name))
- xe_hw_engine_handle_irq(hwe, GT_RENDER_USER_INTERRUPT);
+ if (memirq_received(memirq, status, ilog2(GT_MI_USER_INTERRUPT), hwe->name))
+ xe_hw_engine_handle_irq(hwe, GT_MI_USER_INTERRUPT);
}
static void memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *status,
@@ -434,8 +448,16 @@ static void memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *stat
if (memirq_received(memirq, status, ilog2(GUC_INTR_GUC2HOST), name))
xe_guc_irq_handler(guc, GUC_INTR_GUC2HOST);
- if (memirq_received(memirq, status, ilog2(GUC_INTR_SW_INT_0), name))
+ /*
+ * This is a software interrupt that must be cleared after it's consumed
+ * to avoid race conditions where xe_gt_sriov_vf_recovery_pending()
+ * returns false.
+ */
+ if (memirq_received_noclear(memirq, status, ilog2(GUC_INTR_SW_INT_0),
+ name)) {
xe_guc_irq_handler(guc, GUC_INTR_SW_INT_0);
+ iosys_map_wr(status, ilog2(GUC_INTR_SW_INT_0), u8, 0x00);
+ }
}
/**
@@ -461,6 +483,23 @@ void xe_memirq_hwe_handler(struct xe_memirq *memirq, struct xe_hw_engine *hwe)
}
/**
+ * xe_memirq_guc_sw_int_0_irq_pending() - SW_INT_0 IRQ is pending
+ * @memirq: the &xe_memirq
+ * @guc: the &xe_guc to check for IRQ
+ *
+ * Return: True if SW_INT_0 IRQ is pending on @guc, False otherwise
+ */
+bool xe_memirq_guc_sw_int_0_irq_pending(struct xe_memirq *memirq, struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 offset = xe_gt_is_media_type(gt) ? ilog2(INTR_MGUC) : ilog2(INTR_GUC);
+ struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&memirq->status, offset * SZ_16);
+
+ return memirq_received_noclear(memirq, &map, ilog2(GUC_INTR_SW_INT_0),
+ guc_name(guc));
+}
+
+/**
* xe_memirq_handler - The `Memory Based Interrupts`_ Handler.
* @memirq: the &xe_memirq
*
diff --git a/drivers/gpu/drm/xe/xe_memirq.h b/drivers/gpu/drm/xe/xe_memirq.h
index 06130650e9d6..e25d2234ab87 100644
--- a/drivers/gpu/drm/xe/xe_memirq.h
+++ b/drivers/gpu/drm/xe/xe_memirq.h
@@ -25,4 +25,6 @@ void xe_memirq_handler(struct xe_memirq *memirq);
int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc);
+bool xe_memirq_guc_sw_int_0_irq_pending(struct xe_memirq *memirq, struct xe_guc *guc);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index a36ce7dce8cc..3112c966c67d 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -57,6 +57,13 @@ struct xe_migrate {
u64 usm_batch_base_ofs;
/** @cleared_mem_ofs: VM offset of @cleared_bo. */
u64 cleared_mem_ofs;
+ /** @large_page_copy_ofs: VM offset of 2M pages used for large copies */
+ u64 large_page_copy_ofs;
+ /**
+ * @large_page_copy_pdes: BO offset to writeout 2M pages (PDEs) used for
+ * large copies
+ */
+ u64 large_page_copy_pdes;
/**
* @fence: dma-fence representing the last migration job batch.
* Protected by @job_mutex.
@@ -288,6 +295,12 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
(i + 1) * 8, u64, entry);
}
+ /* Reserve 2M PDEs */
+ level = 1;
+ m->large_page_copy_ofs = NUM_PT_SLOTS << xe_pt_shift(level);
+ m->large_page_copy_pdes = map_ofs + XE_PAGE_SIZE * level +
+ NUM_PT_SLOTS * 8;
+
/* Set up a 1GiB NULL mapping at 255GiB offset. */
level = 2;
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level + 255 * 8, u64,
@@ -980,15 +993,27 @@ struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate)
return migrate->q->lrc[0];
}
-static int emit_flush_invalidate(struct xe_exec_queue *q, u32 *dw, int i,
- u32 flags)
+static u64 migrate_vm_ppgtt_addr_tlb_inval(void)
+{
+ /*
+ * The migrate VM is self-referential so it can modify its own PTEs (see
+ * pte_update_size() or emit_pte() functions). We reserve NUM_KERNEL_PDE
+ * entries for kernel operations (copies, clears, CCS migrate), and
+ * suballocate the rest to user operations (binds/unbinds). With
+ * NUM_KERNEL_PDE = 15, NUM_KERNEL_PDE - 1 is already used for PTE updates,
+ * so assign NUM_KERNEL_PDE - 2 for TLB invalidation.
+ */
+ return (NUM_KERNEL_PDE - 2) * XE_PAGE_SIZE;
+}
+
+static int emit_flush_invalidate(u32 *dw, int i, u32 flags)
{
- struct xe_lrc *lrc = xe_exec_queue_lrc(q);
+ u64 addr = migrate_vm_ppgtt_addr_tlb_inval();
+
dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
MI_FLUSH_IMM_DW | flags;
- dw[i++] = lower_32_bits(xe_lrc_start_seqno_ggtt_addr(lrc)) |
- MI_FLUSH_DW_USE_GTT;
- dw[i++] = upper_32_bits(xe_lrc_start_seqno_ggtt_addr(lrc));
+ dw[i++] = lower_32_bits(addr);
+ dw[i++] = upper_32_bits(addr);
dw[i++] = MI_NOOP;
dw[i++] = MI_NOOP;
@@ -1101,11 +1126,11 @@ int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
- bb->len = emit_flush_invalidate(q, bb->cs, bb->len, flush_flags);
+ bb->len = emit_flush_invalidate(bb->cs, bb->len, flush_flags);
flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, src_is_pltt,
src_L0_ofs, dst_is_pltt,
src_L0, ccs_ofs, true);
- bb->len = emit_flush_invalidate(q, bb->cs, bb->len, flush_flags);
+ bb->len = emit_flush_invalidate(bb->cs, bb->len, flush_flags);
size -= src_L0;
}
@@ -1766,16 +1791,18 @@ static u32 pte_update_cmd_size(u64 size)
static void build_pt_update_batch_sram(struct xe_migrate *m,
struct xe_bb *bb, u32 pt_offset,
struct drm_pagemap_addr *sram_addr,
- u32 size)
+ u32 size, int level)
{
u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB];
+ u64 gpu_page_size = 0x1ull << xe_pt_shift(level);
u32 ptes;
int i = 0;
- ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
+ ptes = DIV_ROUND_UP(size, gpu_page_size);
while (ptes) {
u32 chunk = min(MAX_PTE_PER_SDI, ptes);
+ chunk = ALIGN_DOWN(chunk, PAGE_SIZE / XE_PAGE_SIZE);
bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
bb->cs[bb->len++] = pt_offset;
bb->cs[bb->len++] = 0;
@@ -1784,22 +1811,47 @@ static void build_pt_update_batch_sram(struct xe_migrate *m,
ptes -= chunk;
while (chunk--) {
- u64 addr = sram_addr[i].addr & PAGE_MASK;
+ u64 addr = sram_addr[i].addr & ~(gpu_page_size - 1);
+ u64 pte, orig_addr = addr;
xe_tile_assert(m->tile, sram_addr[i].proto ==
DRM_INTERCONNECT_SYSTEM);
xe_tile_assert(m->tile, addr);
- addr = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
- addr, pat_index,
- 0, false, 0);
- bb->cs[bb->len++] = lower_32_bits(addr);
- bb->cs[bb->len++] = upper_32_bits(addr);
- i++;
+again:
+ pte = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
+ addr, pat_index,
+ level, false, 0);
+ bb->cs[bb->len++] = lower_32_bits(pte);
+ bb->cs[bb->len++] = upper_32_bits(pte);
+
+ if (gpu_page_size < PAGE_SIZE) {
+ addr += XE_PAGE_SIZE;
+ if (orig_addr + PAGE_SIZE != addr) {
+ chunk--;
+ goto again;
+ }
+ i++;
+ } else {
+ i += gpu_page_size / PAGE_SIZE;
+ }
}
}
}
+static bool xe_migrate_vram_use_pde(struct drm_pagemap_addr *sram_addr,
+ unsigned long size)
+{
+ u32 large_size = (0x1 << xe_pt_shift(1));
+ unsigned long i, incr = large_size / PAGE_SIZE;
+
+ for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE); i += incr)
+ if (PAGE_SIZE << sram_addr[i].order != large_size)
+ return false;
+
+ return true;
+}
+
enum xe_migrate_copy_dir {
XE_MIGRATE_COPY_TO_VRAM,
XE_MIGRATE_COPY_TO_SRAM,
@@ -1829,6 +1881,7 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
PAGE_SIZE : 4;
int err;
unsigned long i, j;
+ bool use_pde = xe_migrate_vram_use_pde(sram_addr, len + sram_offset);
if (drm_WARN_ON(&xe->drm, (len & XE_CACHELINE_MASK) ||
(sram_offset | vram_addr) & XE_CACHELINE_MASK))
@@ -1853,7 +1906,7 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
* struct drm_pagemap_addr. Ensure this is the case even with higher
* orders.
*/
- for (i = 0; i < npages;) {
+ for (i = 0; !use_pde && i < npages;) {
unsigned int order = sram_addr[i].order;
for (j = 1; j < NR_PAGES(order) && i + j < npages; j++)
@@ -1863,16 +1916,26 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
i += NR_PAGES(order);
}
- build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
- sram_addr, len + sram_offset);
+ if (use_pde)
+ build_pt_update_batch_sram(m, bb, m->large_page_copy_pdes,
+ sram_addr, len + sram_offset, 1);
+ else
+ build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
+ sram_addr, len + sram_offset, 0);
if (dir == XE_MIGRATE_COPY_TO_VRAM) {
- src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
+ if (use_pde)
+ src_L0_ofs = m->large_page_copy_ofs + sram_offset;
+ else
+ src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
dst_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
} else {
src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
- dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
+ if (use_pde)
+ dst_L0_ofs = m->large_page_copy_ofs + sram_offset;
+ else
+ dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
}
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c
index 0c737413fcb6..e8ec4114302e 100644
--- a/drivers/gpu/drm/xe/xe_mocs.c
+++ b/drivers/gpu/drm/xe/xe_mocs.c
@@ -576,6 +576,7 @@ static unsigned int get_mocs_settings(struct xe_device *xe,
memset(info, 0, sizeof(struct xe_mocs_info));
switch (xe->info.platform) {
+ case XE_NOVALAKE_S:
case XE_PANTHERLAKE:
case XE_LUNARLAKE:
case XE_BATTLEMAGE:
@@ -772,12 +773,20 @@ void xe_mocs_init(struct xe_gt *gt)
init_l3cc_table(gt, &table);
}
-void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p)
+/**
+ * xe_mocs_dump() - Dump MOCS table.
+ * @gt: the &xe_gt with MOCS table
+ * @p: the &drm_printer to dump info to
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
enum xe_force_wake_domains domain;
struct xe_mocs_info table;
unsigned int fw_ref, flags;
+ int err = 0;
flags = get_mocs_settings(xe, &table);
@@ -785,14 +794,17 @@ void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p)
xe_pm_runtime_get_noresume(xe);
fw_ref = xe_force_wake_get(gt_to_fw(gt), domain);
- if (!xe_force_wake_ref_has_domain(fw_ref, domain))
+ if (!xe_force_wake_ref_has_domain(fw_ref, domain)) {
+ err = -ETIMEDOUT;
goto err_fw;
+ }
table.ops->dump(&table, flags, gt, p);
err_fw:
xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_pm_runtime_put(xe);
+ return err;
}
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
diff --git a/drivers/gpu/drm/xe/xe_mocs.h b/drivers/gpu/drm/xe/xe_mocs.h
index dc972ffd4d07..f00bbb269829 100644
--- a/drivers/gpu/drm/xe/xe_mocs.h
+++ b/drivers/gpu/drm/xe/xe_mocs.h
@@ -11,12 +11,6 @@ struct xe_gt;
void xe_mocs_init_early(struct xe_gt *gt);
void xe_mocs_init(struct xe_gt *gt);
-
-/**
- * xe_mocs_dump - Dump mocs table
- * @gt: GT structure
- * @p: Printer to dump info to
- */
-void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p);
+int xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p);
#endif
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index a4894eb0d7f3..f901ba52b403 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -837,7 +837,8 @@ static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
xe_oa_configure_oa_context(stream, false);
/* Make sure we disable noa to save power. */
- xe_mmio_rmw32(mmio, RPM_CONFIG1, GT_NOA_ENABLE, 0);
+ if (GT_VER(stream->gt) < 35)
+ xe_mmio_rmw32(mmio, RPM_CONFIG1, GT_NOA_ENABLE, 0);
sqcnt1 = SQCNT1_PMON_ENABLE |
(HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0);
diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c
index 2e7cb99ae87a..7649b554942a 100644
--- a/drivers/gpu/drm/xe/xe_pat.c
+++ b/drivers/gpu/drm/xe/xe_pat.c
@@ -57,7 +57,7 @@ struct xe_pat_ops {
int n_entries);
void (*program_media)(struct xe_gt *gt, const struct xe_pat_table_entry table[],
int n_entries);
- void (*dump)(struct xe_gt *gt, struct drm_printer *p);
+ int (*dump)(struct xe_gt *gt, struct drm_printer *p);
};
static const struct xe_pat_table_entry xelp_pat_table[] = {
@@ -154,6 +154,41 @@ static const struct xe_pat_table_entry xe2_pat_table[] = {
static const struct xe_pat_table_entry xe2_pat_ats = XE2_PAT( 0, 0, 0, 0, 3, 3 );
static const struct xe_pat_table_entry xe2_pat_pta = XE2_PAT( 0, 0, 0, 0, 3, 0 );
+/*
+ * Xe3p_XPC PAT table uses the same layout as Xe2/Xe3, except that there's no
+ * option for compression. Also note that the "L3" and "L4" register fields
+ * actually control L2 and L3 cache respectively on this platform.
+ */
+#define XE3P_XPC_PAT(no_promote, l3clos, l3_policy, l4_policy, __coh_mode) \
+ XE2_PAT(no_promote, 0, l3clos, l3_policy, l4_policy, __coh_mode)
+
+static const struct xe_pat_table_entry xe3p_xpc_pat_ats = XE3P_XPC_PAT( 0, 0, 0, 0, 3 );
+static const struct xe_pat_table_entry xe3p_xpc_pat_pta = XE3P_XPC_PAT( 0, 0, 0, 0, 0 );
+
+static const struct xe_pat_table_entry xe3p_xpc_pat_table[] = {
+ [ 0] = XE3P_XPC_PAT( 0, 0, 0, 0, 0 ),
+ [ 1] = XE3P_XPC_PAT( 0, 0, 0, 0, 2 ),
+ [ 2] = XE3P_XPC_PAT( 0, 0, 0, 0, 3 ),
+ [ 3] = XE3P_XPC_PAT( 0, 0, 3, 3, 0 ),
+ [ 4] = XE3P_XPC_PAT( 0, 0, 3, 3, 2 ),
+ [ 5] = XE3P_XPC_PAT( 0, 0, 3, 0, 0 ),
+ [ 6] = XE3P_XPC_PAT( 0, 0, 3, 0, 2 ),
+ [ 7] = XE3P_XPC_PAT( 0, 0, 3, 0, 3 ),
+ [ 8] = XE3P_XPC_PAT( 0, 0, 0, 3, 0 ),
+ [ 9] = XE3P_XPC_PAT( 0, 0, 0, 3, 2 ),
+ [10] = XE3P_XPC_PAT( 0, 0, 0, 3, 3 ),
+ /* 11..22 are reserved; leave set to all 0's */
+ [23] = XE3P_XPC_PAT( 0, 1, 0, 0, 0 ),
+ [24] = XE3P_XPC_PAT( 0, 1, 0, 0, 2 ),
+ [25] = XE3P_XPC_PAT( 0, 1, 0, 0, 3 ),
+ [26] = XE3P_XPC_PAT( 0, 2, 0, 0, 0 ),
+ [27] = XE3P_XPC_PAT( 0, 2, 0, 0, 2 ),
+ [28] = XE3P_XPC_PAT( 0, 2, 0, 0, 3 ),
+ [29] = XE3P_XPC_PAT( 0, 3, 0, 0, 0 ),
+ [30] = XE3P_XPC_PAT( 0, 3, 0, 0, 2 ),
+ [31] = XE3P_XPC_PAT( 0, 3, 0, 0, 3 ),
+};
+
u16 xe_pat_index_get_coh_mode(struct xe_device *xe, u16 pat_index)
{
WARN_ON(pat_index >= xe->pat.n_entries);
@@ -194,7 +229,7 @@ static void program_pat_mcr(struct xe_gt *gt, const struct xe_pat_table_entry ta
xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_PTA), xe->pat.pat_pta->value);
}
-static void xelp_dump(struct xe_gt *gt, struct drm_printer *p)
+static int xelp_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
unsigned int fw_ref;
@@ -202,7 +237,7 @@ static void xelp_dump(struct xe_gt *gt, struct drm_printer *p)
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (!fw_ref)
- return;
+ return -ETIMEDOUT;
drm_printf(p, "PAT table:\n");
@@ -215,6 +250,7 @@ static void xelp_dump(struct xe_gt *gt, struct drm_printer *p)
}
xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return 0;
}
static const struct xe_pat_ops xelp_pat_ops = {
@@ -222,7 +258,7 @@ static const struct xe_pat_ops xelp_pat_ops = {
.dump = xelp_dump,
};
-static void xehp_dump(struct xe_gt *gt, struct drm_printer *p)
+static int xehp_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
unsigned int fw_ref;
@@ -230,7 +266,7 @@ static void xehp_dump(struct xe_gt *gt, struct drm_printer *p)
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (!fw_ref)
- return;
+ return -ETIMEDOUT;
drm_printf(p, "PAT table:\n");
@@ -245,6 +281,7 @@ static void xehp_dump(struct xe_gt *gt, struct drm_printer *p)
}
xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return 0;
}
static const struct xe_pat_ops xehp_pat_ops = {
@@ -252,7 +289,7 @@ static const struct xe_pat_ops xehp_pat_ops = {
.dump = xehp_dump,
};
-static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p)
+static int xehpc_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
unsigned int fw_ref;
@@ -260,7 +297,7 @@ static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p)
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (!fw_ref)
- return;
+ return -ETIMEDOUT;
drm_printf(p, "PAT table:\n");
@@ -273,6 +310,7 @@ static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p)
}
xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return 0;
}
static const struct xe_pat_ops xehpc_pat_ops = {
@@ -280,7 +318,7 @@ static const struct xe_pat_ops xehpc_pat_ops = {
.dump = xehpc_dump,
};
-static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
+static int xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
unsigned int fw_ref;
@@ -288,7 +326,7 @@ static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (!fw_ref)
- return;
+ return -ETIMEDOUT;
drm_printf(p, "PAT table:\n");
@@ -306,6 +344,7 @@ static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
}
xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return 0;
}
/*
@@ -318,7 +357,7 @@ static const struct xe_pat_ops xelpg_pat_ops = {
.dump = xelpg_dump,
};
-static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
+static int xe2_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
unsigned int fw_ref;
@@ -327,7 +366,7 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (!fw_ref)
- return;
+ return -ETIMEDOUT;
drm_printf(p, "PAT table:\n");
@@ -367,6 +406,7 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
pat);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return 0;
}
static const struct xe_pat_ops xe2_pat_ops = {
@@ -375,9 +415,68 @@ static const struct xe_pat_ops xe2_pat_ops = {
.dump = xe2_dump,
};
+static int xe3p_xpc_dump(struct xe_gt *gt, struct drm_printer *p)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int fw_ref;
+ u32 pat;
+ int i;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return -ETIMEDOUT;
+
+ drm_printf(p, "PAT table:\n");
+
+ for (i = 0; i < xe->pat.n_entries; i++) {
+ pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
+
+ drm_printf(p, "PAT[%2d] = [ %u, %u, %u, %u, %u ] (%#8x)\n", i,
+ !!(pat & XE2_NO_PROMOTE),
+ REG_FIELD_GET(XE2_L3_CLOS, pat),
+ REG_FIELD_GET(XE2_L3_POLICY, pat),
+ REG_FIELD_GET(XE2_L4_POLICY, pat),
+ REG_FIELD_GET(XE2_COH_MODE, pat),
+ pat);
+ }
+
+ /*
+ * Also print PTA_MODE, which describes how the hardware accesses
+ * PPGTT entries.
+ */
+ pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_PTA));
+
+ drm_printf(p, "Page Table Access:\n");
+ drm_printf(p, "PTA_MODE= [ %u, %u, %u, %u, %u ] (%#8x)\n",
+ !!(pat & XE2_NO_PROMOTE),
+ REG_FIELD_GET(XE2_L3_CLOS, pat),
+ REG_FIELD_GET(XE2_L3_POLICY, pat),
+ REG_FIELD_GET(XE2_L4_POLICY, pat),
+ REG_FIELD_GET(XE2_COH_MODE, pat),
+ pat);
+
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return 0;
+}
+
+static const struct xe_pat_ops xe3p_xpc_pat_ops = {
+ .program_graphics = program_pat_mcr,
+ .program_media = program_pat,
+ .dump = xe3p_xpc_dump,
+};
+
void xe_pat_init_early(struct xe_device *xe)
{
- if (GRAPHICS_VER(xe) == 30 || GRAPHICS_VER(xe) == 20) {
+ if (GRAPHICS_VERx100(xe) == 3511) {
+ xe->pat.ops = &xe3p_xpc_pat_ops;
+ xe->pat.table = xe3p_xpc_pat_table;
+ xe->pat.pat_ats = &xe3p_xpc_pat_ats;
+ xe->pat.pat_pta = &xe3p_xpc_pat_pta;
+ xe->pat.n_entries = ARRAY_SIZE(xe3p_xpc_pat_table);
+ xe->pat.idx[XE_CACHE_NONE] = 3;
+ xe->pat.idx[XE_CACHE_WT] = 3; /* N/A (no display); use UC */
+ xe->pat.idx[XE_CACHE_WB] = 2;
+ } else if (GRAPHICS_VER(xe) == 30 || GRAPHICS_VER(xe) == 20) {
xe->pat.ops = &xe2_pat_ops;
xe->pat.table = xe2_pat_table;
xe->pat.pat_ats = &xe2_pat_ats;
@@ -462,12 +561,19 @@ void xe_pat_init(struct xe_gt *gt)
xe->pat.ops->program_graphics(gt, xe->pat.table, xe->pat.n_entries);
}
-void xe_pat_dump(struct xe_gt *gt, struct drm_printer *p)
+/**
+ * xe_pat_dump() - Dump GT PAT table into a drm printer.
+ * @gt: the &xe_gt
+ * @p: the &drm_printer
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_pat_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
if (!xe->pat.ops)
- return;
+ return -EOPNOTSUPP;
- xe->pat.ops->dump(gt, p);
+ return xe->pat.ops->dump(gt, p);
}
diff --git a/drivers/gpu/drm/xe/xe_pat.h b/drivers/gpu/drm/xe/xe_pat.h
index fa0dfbe525cd..268c9a899f56 100644
--- a/drivers/gpu/drm/xe/xe_pat.h
+++ b/drivers/gpu/drm/xe/xe_pat.h
@@ -43,12 +43,7 @@ void xe_pat_init_early(struct xe_device *xe);
*/
void xe_pat_init(struct xe_gt *gt);
-/**
- * xe_pat_dump - Dump PAT table
- * @gt: GT structure
- * @p: Printer to dump info to
- */
-void xe_pat_dump(struct xe_gt *gt, struct drm_printer *p);
+int xe_pat_dump(struct xe_gt *gt, struct drm_printer *p);
/**
* xe_pat_index_get_coh_mode - Extract the coherency mode for the given
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 9a6df79fc5b6..c326430e75b5 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -30,6 +30,7 @@
#include "xe_pci_sriov.h"
#include "xe_pci_types.h"
#include "xe_pm.h"
+#include "xe_printk.h"
#include "xe_sriov.h"
#include "xe_step.h"
#include "xe_survivability_mode.h"
@@ -51,15 +52,10 @@ __diag_ignore_all("-Woverride-init", "Allow field overrides in table");
static const struct xe_graphics_desc graphics_xelp = {
.hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0),
-
- .va_bits = 48,
- .vm_max_level = 3,
};
#define XE_HP_FEATURES \
- .has_range_tlb_inval = true, \
- .va_bits = 48, \
- .vm_max_level = 3
+ .has_range_tlb_inval = true
static const struct xe_graphics_desc graphics_xehpg = {
.hw_engine_mask =
@@ -68,9 +64,6 @@ static const struct xe_graphics_desc graphics_xehpg = {
BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3),
XE_HP_FEATURES,
- .vram_flags = XE_VRAM_FLAGS_NEED64K,
-
- .has_flat_ccs = 1,
};
static const struct xe_graphics_desc graphics_xehpc = {
@@ -84,9 +77,6 @@ static const struct xe_graphics_desc graphics_xehpc = {
BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3),
XE_HP_FEATURES,
- .va_bits = 57,
- .vm_max_level = 4,
- .vram_flags = XE_VRAM_FLAGS_NEED64K,
.has_asid = 1,
.has_atomic_enable_pte_bit = 1,
@@ -104,12 +94,9 @@ static const struct xe_graphics_desc graphics_xelpg = {
#define XE2_GFX_FEATURES \
.has_asid = 1, \
.has_atomic_enable_pte_bit = 1, \
- .has_flat_ccs = 1, \
.has_range_tlb_inval = 1, \
.has_usm = 1, \
.has_64bit_timestamp = 1, \
- .va_bits = 48, \
- .vm_max_level = 4, \
.hw_engine_mask = \
BIT(XE_HW_ENGINE_RCS0) | \
BIT(XE_HW_ENGINE_BCS8) | BIT(XE_HW_ENGINE_BCS0) | \
@@ -119,6 +106,13 @@ static const struct xe_graphics_desc graphics_xe2 = {
XE2_GFX_FEATURES,
};
+static const struct xe_graphics_desc graphics_xe3p_xpc = {
+ XE2_GFX_FEATURES,
+ .hw_engine_mask =
+ GENMASK(XE_HW_ENGINE_BCS8, XE_HW_ENGINE_BCS1) |
+ GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0),
+};
+
static const struct xe_media_desc media_xem = {
.hw_engine_mask =
GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
@@ -149,6 +143,9 @@ static const struct xe_ip graphics_ips[] = {
{ 3000, "Xe3_LPG", &graphics_xe2 },
{ 3001, "Xe3_LPG", &graphics_xe2 },
{ 3003, "Xe3_LPG", &graphics_xe2 },
+ { 3004, "Xe3_LPG", &graphics_xe2 },
+ { 3005, "Xe3_LPG", &graphics_xe2 },
+ { 3511, "Xe3p_XPC", &graphics_xe3p_xpc },
};
/* Pre-GMDID Media IPs */
@@ -162,6 +159,8 @@ static const struct xe_ip media_ips[] = {
{ 2000, "Xe2_LPM", &media_xelpmp },
{ 3000, "Xe3_LPM", &media_xelpmp },
{ 3002, "Xe3_LPM", &media_xelpmp },
+ { 3500, "Xe3p_LPM", &media_xelpmp },
+ { 3503, "Xe3p_HPM", &media_xelpmp },
};
static const struct xe_device_desc tgl_desc = {
@@ -174,6 +173,8 @@ static const struct xe_device_desc tgl_desc = {
.has_sriov = true,
.max_gt_per_tile = 1,
.require_force_probe = true,
+ .va_bits = 48,
+ .vm_max_level = 3,
};
static const struct xe_device_desc rkl_desc = {
@@ -185,6 +186,8 @@ static const struct xe_device_desc rkl_desc = {
.has_llc = true,
.max_gt_per_tile = 1,
.require_force_probe = true,
+ .va_bits = 48,
+ .vm_max_level = 3,
};
static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 };
@@ -203,6 +206,8 @@ static const struct xe_device_desc adl_s_desc = {
{ XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids },
{},
},
+ .va_bits = 48,
+ .vm_max_level = 3,
};
static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 };
@@ -221,6 +226,8 @@ static const struct xe_device_desc adl_p_desc = {
{ XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids },
{},
},
+ .va_bits = 48,
+ .vm_max_level = 3,
};
static const struct xe_device_desc adl_n_desc = {
@@ -233,6 +240,8 @@ static const struct xe_device_desc adl_n_desc = {
.has_sriov = true,
.max_gt_per_tile = 1,
.require_force_probe = true,
+ .va_bits = 48,
+ .vm_max_level = 3,
};
#define DGFX_FEATURES \
@@ -249,6 +258,8 @@ static const struct xe_device_desc dg1_desc = {
.has_heci_gscfi = 1,
.max_gt_per_tile = 1,
.require_force_probe = true,
+ .va_bits = 48,
+ .vm_max_level = 3,
};
static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 };
@@ -258,6 +269,7 @@ static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 };
#define DG2_FEATURES \
DGFX_FEATURES, \
PLATFORM(DG2), \
+ .has_flat_ccs = 1, \
.has_gsc_nvm = 1, \
.has_heci_gscfi = 1, \
.subplatforms = (const struct xe_subplatform_desc[]) { \
@@ -265,7 +277,10 @@ static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 };
{ XE_SUBPLATFORM_DG2_G11, "G11", dg2_g11_ids }, \
{ XE_SUBPLATFORM_DG2_G12, "G12", dg2_g12_ids }, \
{ } \
- }
+ }, \
+ .va_bits = 48, \
+ .vm_max_level = 3, \
+ .vram_flags = XE_VRAM_FLAGS_NEED64K
static const struct xe_device_desc ats_m_desc = {
.pre_gmdid_graphics_ip = &graphics_ip_xehpg,
@@ -303,6 +318,9 @@ static const __maybe_unused struct xe_device_desc pvc_desc = {
.max_gt_per_tile = 1,
.max_remote_tiles = 1,
.require_force_probe = true,
+ .va_bits = 57,
+ .vm_max_level = 4,
+ .vram_flags = XE_VRAM_FLAGS_NEED64K,
.has_mbx_power_limits = false,
};
@@ -314,23 +332,31 @@ static const struct xe_device_desc mtl_desc = {
.has_display = true,
.has_pxp = true,
.max_gt_per_tile = 2,
+ .va_bits = 48,
+ .vm_max_level = 4,
};
static const struct xe_device_desc lnl_desc = {
PLATFORM(LUNARLAKE),
.dma_mask_size = 46,
.has_display = true,
+ .has_flat_ccs = 1,
.has_pxp = true,
.max_gt_per_tile = 2,
.needs_scratch = true,
+ .va_bits = 48,
+ .vm_max_level = 4,
};
+static const u16 bmg_g21_ids[] = { INTEL_BMG_G21_IDS(NOP), 0 };
+
static const struct xe_device_desc bmg_desc = {
DGFX_FEATURES,
PLATFORM(BATTLEMAGE),
.dma_mask_size = 46,
.has_display = true,
.has_fan_control = true,
+ .has_flat_ccs = 1,
.has_mbx_power_limits = true,
.has_gsc_nvm = 1,
.has_heci_cscfi = 1,
@@ -338,15 +364,36 @@ static const struct xe_device_desc bmg_desc = {
.has_sriov = true,
.max_gt_per_tile = 2,
.needs_scratch = true,
+ .subplatforms = (const struct xe_subplatform_desc[]) {
+ { XE_SUBPLATFORM_BATTLEMAGE_G21, "G21", bmg_g21_ids },
+ { }
+ },
+ .va_bits = 48,
+ .vm_max_level = 4,
};
static const struct xe_device_desc ptl_desc = {
PLATFORM(PANTHERLAKE),
.dma_mask_size = 46,
.has_display = true,
+ .has_flat_ccs = 1,
.has_sriov = true,
.max_gt_per_tile = 2,
.needs_scratch = true,
+ .needs_shared_vf_gt_wq = true,
+ .va_bits = 48,
+ .vm_max_level = 4,
+};
+
+static const struct xe_device_desc nvls_desc = {
+ PLATFORM(NOVALAKE_S),
+ .dma_mask_size = 46,
+ .has_display = true,
+ .has_flat_ccs = 1,
+ .max_gt_per_tile = 2,
+ .require_force_probe = true,
+ .va_bits = 48,
+ .vm_max_level = 4,
};
#undef PLATFORM
@@ -375,6 +422,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc),
INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc),
INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc),
+ INTEL_NVLS_IDS(INTEL_VGA_DEVICE, &nvls_desc),
{ }
};
MODULE_DEVICE_TABLE(pci, pciidlist);
@@ -447,7 +495,7 @@ enum xe_gmdid_type {
GMDID_MEDIA
};
-static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid)
+static int read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid)
{
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
struct xe_reg gmdid_reg = GMD_ID;
@@ -456,22 +504,24 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver,
KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid);
if (IS_SRIOV_VF(xe)) {
- struct xe_gt *gt = xe_root_mmio_gt(xe);
-
/*
* To get the value of the GMDID register, VFs must obtain it
* from the GuC using MMIO communication.
*
- * Note that at this point the xe_gt is not fully uninitialized
- * and only basic access to MMIO registers is possible. To use
- * our existing GuC communication functions we must perform at
- * least basic xe_gt and xe_guc initialization.
- *
- * Since to obtain the value of GMDID_MEDIA we need to use the
- * media GuC, temporarily tweak the gt type.
+ * Note that at this point the GTs are not initialized and only
+ * tile-level access to MMIO registers is possible. To use our
+ * existing GuC communication functions we must create a dummy
+ * GT structure and perform at least basic xe_gt and xe_guc
+ * initialization.
*/
- xe_gt_assert(gt, gt->info.type == XE_GT_TYPE_UNINITIALIZED);
+ struct xe_gt *gt __free(kfree) = NULL;
+ int err;
+ gt = kzalloc(sizeof(*gt), GFP_KERNEL);
+ if (!gt)
+ return -ENOMEM;
+
+ gt->tile = &xe->tiles[0];
if (type == GMDID_MEDIA) {
gt->info.id = 1;
gt->info.type = XE_GT_TYPE_MEDIA;
@@ -483,15 +533,11 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver,
xe_gt_mmio_init(gt);
xe_guc_comm_init_early(&gt->uc.guc);
- /* Don't bother with GMDID if failed to negotiate the GuC ABI */
- val = xe_gt_sriov_vf_bootstrap(gt) ? 0 : xe_gt_sriov_vf_gmdid(gt);
+ err = xe_gt_sriov_vf_bootstrap(gt);
+ if (err)
+ return err;
- /*
- * Only undo xe_gt.info here, the remaining changes made above
- * will be overwritten as part of the regular initialization.
- */
- gt->info.id = 0;
- gt->info.type = XE_GT_TYPE_UNINITIALIZED;
+ val = xe_gt_sriov_vf_gmdid(gt);
} else {
/*
* GMD_ID is a GT register, but at this point in the driver
@@ -509,6 +555,8 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver,
*ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val);
*revid = REG_FIELD_GET(GMD_ID_REVID, val);
+
+ return 0;
}
static const struct xe_ip *find_graphics_ip(unsigned int verx100)
@@ -535,18 +583,21 @@ static const struct xe_ip *find_media_ip(unsigned int verx100)
* Read IP version from hardware and select graphics/media IP descriptors
* based on the result.
*/
-static void handle_gmdid(struct xe_device *xe,
- const struct xe_ip **graphics_ip,
- const struct xe_ip **media_ip,
- u32 *graphics_revid,
- u32 *media_revid)
+static int handle_gmdid(struct xe_device *xe,
+ const struct xe_ip **graphics_ip,
+ const struct xe_ip **media_ip,
+ u32 *graphics_revid,
+ u32 *media_revid)
{
u32 ver;
+ int ret;
*graphics_ip = NULL;
*media_ip = NULL;
- read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid);
+ ret = read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid);
+ if (ret)
+ return ret;
*graphics_ip = find_graphics_ip(ver);
if (!*graphics_ip) {
@@ -554,16 +605,21 @@ static void handle_gmdid(struct xe_device *xe,
ver / 100, ver % 100);
}
- read_gmdid(xe, GMDID_MEDIA, &ver, media_revid);
+ ret = read_gmdid(xe, GMDID_MEDIA, &ver, media_revid);
+ if (ret)
+ return ret;
+
/* Media may legitimately be fused off / not present */
if (ver == 0)
- return;
+ return 0;
*media_ip = find_media_ip(ver);
if (!*media_ip) {
drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n",
ver / 100, ver % 100);
}
+
+ return 0;
}
/*
@@ -582,8 +638,14 @@ static int xe_info_init_early(struct xe_device *xe,
subplatform_desc->subplatform : XE_SUBPLATFORM_NONE;
xe->info.dma_mask_size = desc->dma_mask_size;
+ xe->info.va_bits = desc->va_bits;
+ xe->info.vm_max_level = desc->vm_max_level;
+ xe->info.vram_flags = desc->vram_flags;
+
xe->info.is_dgfx = desc->is_dgfx;
xe->info.has_fan_control = desc->has_fan_control;
+ /* runtime fusing may force flat_ccs to disabled later */
+ xe->info.has_flat_ccs = desc->has_flat_ccs;
xe->info.has_mbx_power_limits = desc->has_mbx_power_limits;
xe->info.has_gsc_nvm = desc->has_gsc_nvm;
xe->info.has_heci_gscfi = desc->has_heci_gscfi;
@@ -591,11 +653,13 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.has_late_bind = desc->has_late_bind;
xe->info.has_llc = desc->has_llc;
xe->info.has_pxp = desc->has_pxp;
- xe->info.has_sriov = desc->has_sriov;
+ xe->info.has_sriov = xe_configfs_primary_gt_allowed(to_pci_dev(xe->drm.dev)) &&
+ desc->has_sriov;
xe->info.skip_guc_pc = desc->skip_guc_pc;
xe->info.skip_mtcfg = desc->skip_mtcfg;
xe->info.skip_pcode = desc->skip_pcode;
xe->info.needs_scratch = desc->needs_scratch;
+ xe->info.needs_shared_vf_gt_wq = desc->needs_shared_vf_gt_wq;
xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
xe_modparam.probe_display &&
@@ -651,6 +715,63 @@ static void xe_info_probe_tile_count(struct xe_device *xe)
}
}
+static struct xe_gt *alloc_primary_gt(struct xe_tile *tile,
+ const struct xe_graphics_desc *graphics_desc,
+ const struct xe_media_desc *media_desc)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_gt *gt;
+
+ if (!xe_configfs_primary_gt_allowed(to_pci_dev(xe->drm.dev))) {
+ xe_info(xe, "Primary GT disabled via configfs\n");
+ return NULL;
+ }
+
+ gt = xe_gt_alloc(tile);
+ if (IS_ERR(gt))
+ return gt;
+
+ gt->info.type = XE_GT_TYPE_MAIN;
+ gt->info.id = tile->id * xe->info.max_gt_per_tile;
+ gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state;
+ gt->info.engine_mask = graphics_desc->hw_engine_mask;
+
+ /*
+ * Before media version 13, the media IP was part of the primary GT
+ * so we need to add the media engines to the primary GT's engine list.
+ */
+ if (MEDIA_VER(xe) < 13 && media_desc)
+ gt->info.engine_mask |= media_desc->hw_engine_mask;
+
+ return gt;
+}
+
+static struct xe_gt *alloc_media_gt(struct xe_tile *tile,
+ const struct xe_media_desc *media_desc)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_gt *gt;
+
+ if (!xe_configfs_media_gt_allowed(to_pci_dev(xe->drm.dev))) {
+ xe_info(xe, "Media GT disabled via configfs\n");
+ return NULL;
+ }
+
+ if (MEDIA_VER(xe) < 13 || !media_desc)
+ return NULL;
+
+ gt = xe_gt_alloc(tile);
+ if (IS_ERR(gt))
+ return gt;
+
+ gt->info.type = XE_GT_TYPE_MEDIA;
+ gt->info.id = tile->id * xe->info.max_gt_per_tile + 1;
+ gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state;
+ gt->info.engine_mask = media_desc->hw_engine_mask;
+
+ return gt;
+}
+
/*
* Initialize device info content that does require knowledge about
* graphics / media IP version.
@@ -667,6 +788,7 @@ static int xe_info_init(struct xe_device *xe,
const struct xe_media_desc *media_desc;
struct xe_tile *tile;
struct xe_gt *gt;
+ int ret;
u8 id;
/*
@@ -682,8 +804,11 @@ static int xe_info_init(struct xe_device *xe,
xe->info.step = xe_step_pre_gmdid_get(xe);
} else {
xe_assert(xe, !desc->pre_gmdid_media_ip);
- handle_gmdid(xe, &graphics_ip, &media_ip,
- &graphics_gmdid_revid, &media_gmdid_revid);
+ ret = handle_gmdid(xe, &graphics_ip, &media_ip,
+ &graphics_gmdid_revid, &media_gmdid_revid);
+ if (ret)
+ return ret;
+
xe->info.step = xe_step_gmdid_get(xe,
graphics_gmdid_revid,
media_gmdid_revid);
@@ -710,17 +835,11 @@ static int xe_info_init(struct xe_device *xe,
media_desc = NULL;
}
- xe->info.vram_flags = graphics_desc->vram_flags;
- xe->info.va_bits = graphics_desc->va_bits;
- xe->info.vm_max_level = graphics_desc->vm_max_level;
xe->info.has_asid = graphics_desc->has_asid;
xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit;
if (xe->info.platform != XE_PVC)
xe->info.has_device_atomics_on_smem = 1;
- /* Runtime detection may change this later */
- xe->info.has_flat_ccs = graphics_desc->has_flat_ccs;
-
xe->info.has_range_tlb_inval = graphics_desc->has_range_tlb_inval;
xe->info.has_usm = graphics_desc->has_usm;
xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp;
@@ -735,44 +854,33 @@ static int xe_info_init(struct xe_device *xe,
return err;
}
- /*
- * All platforms have at least one primary GT. Any platform with media
- * version 13 or higher has an additional dedicated media GT. And
- * depending on the graphics IP there may be additional "remote tiles."
- * All of these together determine the overall GT count.
- */
+ /* Allocate any GT and VRAM structures necessary for the platform. */
for_each_tile(tile, xe, id) {
int err;
- gt = tile->primary_gt;
- gt->info.type = XE_GT_TYPE_MAIN;
- gt->info.id = tile->id * xe->info.max_gt_per_tile;
- gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state;
- gt->info.engine_mask = graphics_desc->hw_engine_mask;
-
err = xe_tile_alloc_vram(tile);
if (err)
return err;
- if (MEDIA_VER(xe) < 13 && media_desc)
- gt->info.engine_mask |= media_desc->hw_engine_mask;
-
- if (MEDIA_VER(xe) < 13 || !media_desc)
- continue;
+ tile->primary_gt = alloc_primary_gt(tile, graphics_desc, media_desc);
+ if (IS_ERR(tile->primary_gt))
+ return PTR_ERR(tile->primary_gt);
/*
- * Allocate and setup media GT for platforms with standalone
- * media.
+ * It's not currently possible to probe a device with the
+ * primary GT disabled. With some work, this may be future in
+ * the possible for igpu platforms (although probably not for
+ * dgpu's since access to the primary GT's BCS engines is
+ * required for VRAM management).
*/
- tile->media_gt = xe_gt_alloc(tile);
+ if (!tile->primary_gt) {
+ drm_err(&xe->drm, "Cannot probe device with without a primary GT\n");
+ return -ENODEV;
+ }
+
+ tile->media_gt = alloc_media_gt(tile, media_desc);
if (IS_ERR(tile->media_gt))
return PTR_ERR(tile->media_gt);
-
- gt = tile->media_gt;
- gt->info.type = XE_GT_TYPE_MEDIA;
- gt->info.id = tile->id * xe->info.max_gt_per_tile + 1;
- gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state;
- gt->info.engine_mask = media_desc->hw_engine_mask;
}
/*
diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c
index af05db07162e..735f51effc7a 100644
--- a/drivers/gpu/drm/xe/xe_pci_sriov.c
+++ b/drivers/gpu/drm/xe/xe_pci_sriov.c
@@ -17,56 +17,17 @@
#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_sriov_pf.h"
+#include "xe_sriov_pf_control.h"
#include "xe_sriov_pf_helpers.h"
+#include "xe_sriov_pf_provision.h"
#include "xe_sriov_printk.h"
-static int pf_needs_provisioning(struct xe_gt *gt, unsigned int num_vfs)
-{
- unsigned int n;
-
- for (n = 1; n <= num_vfs; n++)
- if (!xe_gt_sriov_pf_config_is_empty(gt, n))
- return false;
-
- return true;
-}
-
-static int pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs)
-{
- struct xe_gt *gt;
- unsigned int id;
- int result = 0, err;
-
- for_each_gt(gt, xe, id) {
- if (!pf_needs_provisioning(gt, num_vfs))
- continue;
- err = xe_gt_sriov_pf_config_set_fair(gt, VFID(1), num_vfs);
- result = result ?: err;
- }
-
- return result;
-}
-
-static void pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs)
-{
- struct xe_gt *gt;
- unsigned int id;
- unsigned int n;
-
- for_each_gt(gt, xe, id)
- for (n = 1; n <= num_vfs; n++)
- xe_gt_sriov_pf_config_release(gt, n, true);
-}
-
static void pf_reset_vfs(struct xe_device *xe, unsigned int num_vfs)
{
- struct xe_gt *gt;
- unsigned int id;
unsigned int n;
- for_each_gt(gt, xe, id)
- for (n = 1; n <= num_vfs; n++)
- xe_gt_sriov_pf_control_trigger_flr(gt, n);
+ for (n = 1; n <= num_vfs; n++)
+ xe_sriov_pf_control_reset_vf(xe, n);
}
static struct pci_dev *xe_pci_pf_get_vf_dev(struct xe_device *xe, unsigned int vf_id)
@@ -170,7 +131,7 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
*/
xe_pm_runtime_get_noresume(xe);
- err = pf_provision_vfs(xe, num_vfs);
+ err = xe_sriov_pf_provision_vfs(xe, num_vfs);
if (err < 0)
goto failed;
@@ -194,7 +155,7 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
return num_vfs;
failed:
- pf_unprovision_vfs(xe, num_vfs);
+ xe_sriov_pf_unprovision_vfs(xe, num_vfs);
xe_pm_runtime_put(xe);
out:
xe_sriov_notice(xe, "Failed to enable %u VF%s (%pe)\n",
@@ -220,7 +181,7 @@ static int pf_disable_vfs(struct xe_device *xe)
pf_reset_vfs(xe, num_vfs);
- pf_unprovision_vfs(xe, num_vfs);
+ xe_sriov_pf_unprovision_vfs(xe, num_vfs);
/* not needed anymore - see pf_enable_vfs() */
xe_pm_runtime_put(xe);
diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h
index 9b9766a3baa3..a4451bdc79fb 100644
--- a/drivers/gpu/drm/xe/xe_pci_types.h
+++ b/drivers/gpu/drm/xe/xe_pci_types.h
@@ -30,12 +30,16 @@ struct xe_device_desc {
u8 dma_mask_size;
u8 max_remote_tiles:2;
u8 max_gt_per_tile:2;
+ u8 va_bits;
+ u8 vm_max_level;
+ u8 vram_flags;
u8 require_force_probe:1;
u8 is_dgfx:1;
u8 has_display:1;
u8 has_fan_control:1;
+ u8 has_flat_ccs:1;
u8 has_gsc_nvm:1;
u8 has_heci_gscfi:1;
u8 has_heci_cscfi:1;
@@ -48,18 +52,14 @@ struct xe_device_desc {
u8 skip_guc_pc:1;
u8 skip_mtcfg:1;
u8 skip_pcode:1;
+ u8 needs_shared_vf_gt_wq:1;
};
struct xe_graphics_desc {
- u8 va_bits;
- u8 vm_max_level;
- u8 vram_flags;
-
u64 hw_engine_mask; /* hardware engines provided by graphics IP */
u8 has_asid:1;
u8 has_atomic_enable_pte_bit:1;
- u8 has_flat_ccs:1;
u8 has_indirect_ring_state:1;
u8 has_range_tlb_inval:1;
u8 has_usm:1;
diff --git a/drivers/gpu/drm/xe/xe_platform_types.h b/drivers/gpu/drm/xe/xe_platform_types.h
index d08574c4cdb8..78286285c249 100644
--- a/drivers/gpu/drm/xe/xe_platform_types.h
+++ b/drivers/gpu/drm/xe/xe_platform_types.h
@@ -24,6 +24,7 @@ enum xe_platform {
XE_LUNARLAKE,
XE_BATTLEMAGE,
XE_PANTHERLAKE,
+ XE_NOVALAKE_S,
};
enum xe_subplatform {
@@ -34,6 +35,7 @@ enum xe_subplatform {
XE_SUBPLATFORM_DG2_G10,
XE_SUBPLATFORM_DG2_G11,
XE_SUBPLATFORM_DG2_G12,
+ XE_SUBPLATFORM_BATTLEMAGE_G21,
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index 2c5a44377994..53507e09f7bc 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -83,8 +83,58 @@ static struct lockdep_map xe_pm_runtime_d3cold_map = {
static struct lockdep_map xe_pm_runtime_nod3cold_map = {
.name = "xe_rpm_nod3cold_map"
};
+
+static struct lockdep_map xe_pm_block_lockdep_map = {
+ .name = "xe_pm_block_map",
+};
#endif
+static void xe_pm_block_begin_signalling(void)
+{
+ lock_acquire_shared_recursive(&xe_pm_block_lockdep_map, 0, 1, NULL, _RET_IP_);
+}
+
+static void xe_pm_block_end_signalling(void)
+{
+ lock_release(&xe_pm_block_lockdep_map, _RET_IP_);
+}
+
+/**
+ * xe_pm_might_block_on_suspend() - Annotate that the code might block on suspend
+ *
+ * Annotation to use where the code might block or sieze to make
+ * progress pending resume completion.
+ */
+void xe_pm_might_block_on_suspend(void)
+{
+ lock_map_acquire(&xe_pm_block_lockdep_map);
+ lock_map_release(&xe_pm_block_lockdep_map);
+}
+
+/**
+ * xe_pm_might_block_on_suspend() - Block pending suspend.
+ * @xe: The xe device about to be suspended.
+ *
+ * Block if the pm notifier has start evicting bos, to avoid
+ * racing and validating those bos back. The function is
+ * annotated to ensure no locks are held that are also grabbed
+ * in the pm notifier or the device suspend / resume.
+ * This is intended to be used by freezable tasks only.
+ * (Not freezable workqueues), with the intention that the function
+ * returns %-ERESTARTSYS when tasks are frozen during suspend,
+ * and allows the task to freeze. The caller must be able to
+ * handle the %-ERESTARTSYS.
+ *
+ * Return: %0 on success, %-ERESTARTSYS on signal pending or
+ * if freezing requested.
+ */
+int xe_pm_block_on_suspend(struct xe_device *xe)
+{
+ xe_pm_might_block_on_suspend();
+
+ return wait_for_completion_interruptible(&xe->pm_block);
+}
+
/**
* xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context
* @xe: The xe device.
@@ -124,6 +174,7 @@ int xe_pm_suspend(struct xe_device *xe)
int err;
drm_dbg(&xe->drm, "Suspending device\n");
+ xe_pm_block_begin_signalling();
trace_xe_pm_suspend(xe, __builtin_return_address(0));
err = xe_pxp_pm_suspend(xe->pxp);
@@ -155,6 +206,8 @@ int xe_pm_suspend(struct xe_device *xe)
xe_i2c_pm_suspend(xe);
drm_dbg(&xe->drm, "Device suspended\n");
+ xe_pm_block_end_signalling();
+
return 0;
err_display:
@@ -162,6 +215,7 @@ err_display:
xe_pxp_pm_resume(xe->pxp);
err:
drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
+ xe_pm_block_end_signalling();
return err;
}
@@ -178,6 +232,7 @@ int xe_pm_resume(struct xe_device *xe)
u8 id;
int err;
+ xe_pm_block_begin_signalling();
drm_dbg(&xe->drm, "Resuming device\n");
trace_xe_pm_resume(xe, __builtin_return_address(0));
@@ -222,9 +277,11 @@ int xe_pm_resume(struct xe_device *xe)
xe_late_bind_fw_load(&xe->late_bind);
drm_dbg(&xe->drm, "Device resumed\n");
+ xe_pm_block_end_signalling();
return 0;
err:
drm_dbg(&xe->drm, "Device resume failed %d\n", err);
+ xe_pm_block_end_signalling();
return err;
}
@@ -329,9 +386,16 @@ static int xe_pm_notifier_callback(struct notifier_block *nb,
switch (action) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
+ {
+ struct xe_validation_ctx ctx;
+
reinit_completion(&xe->pm_block);
+ xe_pm_block_begin_signalling();
xe_pm_runtime_get(xe);
+ (void)xe_validation_ctx_init(&ctx, &xe->val, NULL,
+ (struct xe_val_flags) {.exclusive = true});
err = xe_bo_evict_all_user(xe);
+ xe_validation_ctx_fini(&ctx);
if (err)
drm_dbg(&xe->drm, "Notifier evict user failed (%d)\n", err);
@@ -343,7 +407,9 @@ static int xe_pm_notifier_callback(struct notifier_block *nb,
* avoid a runtime suspend interfering with evicted objects or backup
* allocations.
*/
+ xe_pm_block_end_signalling();
break;
+ }
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
complete_all(&xe->pm_block);
diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h
index 59678b310e55..f7f89a18b6fc 100644
--- a/drivers/gpu/drm/xe/xe_pm.h
+++ b/drivers/gpu/drm/xe/xe_pm.h
@@ -33,6 +33,8 @@ int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold);
void xe_pm_d3cold_allowed_toggle(struct xe_device *xe);
bool xe_rpm_reclaim_safe(const struct xe_device *xe);
struct task_struct *xe_pm_read_callback_task(struct xe_device *xe);
+int xe_pm_block_on_suspend(struct xe_device *xe);
+void xe_pm_might_block_on_suspend(void);
int xe_pm_module_init(void);
#endif
diff --git a/drivers/gpu/drm/xe/xe_pmu.c b/drivers/gpu/drm/xe/xe_pmu.c
index cab51d826345..c63335eb69e5 100644
--- a/drivers/gpu/drm/xe/xe_pmu.c
+++ b/drivers/gpu/drm/xe/xe_pmu.c
@@ -497,7 +497,12 @@ static const struct attribute_group *pmu_events_attr_update[] = {
static void set_supported_events(struct xe_pmu *pmu)
{
struct xe_device *xe = container_of(pmu, typeof(*xe), pmu);
- struct xe_gt *gt = xe_device_get_gt(xe, 0);
+ struct xe_gt *gt;
+ int id;
+
+ /* If there are no GTs, don't support any GT-related events */
+ if (xe->info.gt_count == 0)
+ return;
if (!xe->info.skip_guc_pc) {
pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_C6_RESIDENCY);
@@ -505,6 +510,10 @@ static void set_supported_events(struct xe_pmu *pmu)
pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_REQUESTED_FREQUENCY);
}
+ /* Find the first available GT to query engine event capabilities */
+ for_each_gt(gt, xe, id)
+ break;
+
if (xe_guc_engine_activity_supported(&gt->uc.guc)) {
pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_ENGINE_TOTAL_TICKS);
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
index 83fbeea5aa20..7f587ca3947d 100644
--- a/drivers/gpu/drm/xe/xe_preempt_fence.c
+++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
@@ -8,6 +8,8 @@
#include <linux/slab.h>
#include "xe_exec_queue.h"
+#include "xe_gt_printk.h"
+#include "xe_guc_exec_queue_types.h"
#include "xe_vm.h"
static void preempt_fence_work_func(struct work_struct *w)
@@ -22,6 +24,15 @@ static void preempt_fence_work_func(struct work_struct *w)
} else if (!q->ops->reset_status(q)) {
int err = q->ops->suspend_wait(q);
+ if (err == -EAGAIN) {
+ xe_gt_dbg(q->gt, "PREEMPT FENCE RETRY guc_id=%d",
+ q->guc->id);
+ queue_work(q->vm->xe->preempt_fence_wq,
+ &pfence->preempt_work);
+ dma_fence_end_signalling(cookie);
+ return;
+ }
+
if (err)
dma_fence_set_error(&pfence->base, err);
} else {
diff --git a/drivers/gpu/drm/xe/xe_psmi.c b/drivers/gpu/drm/xe/xe_psmi.c
index 45d142191d60..6a54e38b81ba 100644
--- a/drivers/gpu/drm/xe/xe_psmi.c
+++ b/drivers/gpu/drm/xe/xe_psmi.c
@@ -70,8 +70,8 @@ static struct xe_bo *psmi_alloc_object(struct xe_device *xe,
{
struct xe_tile *tile;
- if (!id || !bo_size)
- return NULL;
+ xe_assert(xe, id);
+ xe_assert(xe, bo_size);
tile = &xe->tiles[id - 1];
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 07f96bda638a..d22fd1ccc0ba 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -122,7 +122,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
XE_BO_FLAG_NO_RESV_EVICT | XE_BO_FLAG_PAGETABLE;
if (vm->xef) /* userspace */
- bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
+ bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE | XE_BO_FLAG_FORCE_USER_VRAM;
pt->level = level;
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 2e9ff33ed2fe..1c0915e2cc16 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -436,7 +436,7 @@ static int query_hwconfig(struct xe_device *xe,
struct drm_xe_device_query *query)
{
struct xe_gt *gt = xe_root_mmio_gt(xe);
- size_t size = xe_guc_hwconfig_size(&gt->uc.guc);
+ size_t size = gt ? xe_guc_hwconfig_size(&gt->uc.guc) : 0;
void __user *query_ptr = u64_to_user_ptr(query->data);
void *hwconfig;
diff --git a/drivers/gpu/drm/xe/xe_reg_whitelist.c b/drivers/gpu/drm/xe/xe_reg_whitelist.c
index 23f6c81d9994..690bc327a363 100644
--- a/drivers/gpu/drm/xe/xe_reg_whitelist.c
+++ b/drivers/gpu/drm/xe/xe_reg_whitelist.c
@@ -19,7 +19,8 @@
#undef XE_REG_MCR
#define XE_REG_MCR(...) XE_REG(__VA_ARGS__, .mcr = 1)
-static bool match_not_render(const struct xe_gt *gt,
+static bool match_not_render(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe)
{
return hwe->class != XE_ENGINE_CLASS_RENDER;
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index d71837773d6c..ac0c6dcffe15 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -245,12 +245,14 @@ static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)
/* for engines that don't require any special HW handling (no EUs, no aux inval, etc) */
static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc,
- u64 batch_addr, u32 seqno)
+ u64 batch_addr, u32 *head, u32 seqno)
{
u32 dw[MAX_JOB_SIZE_DW], i = 0;
u32 ppgtt_flag = get_ppgtt_flag(job);
struct xe_gt *gt = job->q->gt;
+ *head = lrc->ring.tail;
+
i = emit_copy_timestamp(lrc, dw, i);
if (job->ring_ops_flush_tlb) {
@@ -296,7 +298,7 @@ static bool has_aux_ccs(struct xe_device *xe)
}
static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
- u64 batch_addr, u32 seqno)
+ u64 batch_addr, u32 *head, u32 seqno)
{
u32 dw[MAX_JOB_SIZE_DW], i = 0;
u32 ppgtt_flag = get_ppgtt_flag(job);
@@ -304,6 +306,8 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
struct xe_device *xe = gt_to_xe(gt);
bool decode = job->q->class == XE_ENGINE_CLASS_VIDEO_DECODE;
+ *head = lrc->ring.tail;
+
i = emit_copy_timestamp(lrc, dw, i);
dw[i++] = preparser_disable(true);
@@ -346,7 +350,8 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
struct xe_lrc *lrc,
- u64 batch_addr, u32 seqno)
+ u64 batch_addr, u32 *head,
+ u32 seqno)
{
u32 dw[MAX_JOB_SIZE_DW], i = 0;
u32 ppgtt_flag = get_ppgtt_flag(job);
@@ -355,6 +360,8 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
u32 mask_flags = 0;
+ *head = lrc->ring.tail;
+
i = emit_copy_timestamp(lrc, dw, i);
dw[i++] = preparser_disable(true);
@@ -396,11 +403,14 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
}
static void emit_migration_job_gen12(struct xe_sched_job *job,
- struct xe_lrc *lrc, u32 seqno)
+ struct xe_lrc *lrc, u32 *head,
+ u32 seqno)
{
u32 saddr = xe_lrc_start_seqno_ggtt_addr(lrc);
u32 dw[MAX_JOB_SIZE_DW], i = 0;
+ *head = lrc->ring.tail;
+
i = emit_copy_timestamp(lrc, dw, i);
i = emit_store_imm_ggtt(saddr, seqno, dw, i);
@@ -434,6 +444,7 @@ static void emit_job_gen12_gsc(struct xe_sched_job *job)
__emit_job_gen12_simple(job, job->q->lrc[0],
job->ptrs[0].batch_addr,
+ &job->ptrs[0].head,
xe_sched_job_lrc_seqno(job));
}
@@ -443,6 +454,7 @@ static void emit_job_gen12_copy(struct xe_sched_job *job)
if (xe_sched_job_is_migration(job->q)) {
emit_migration_job_gen12(job, job->q->lrc[0],
+ &job->ptrs[0].head,
xe_sched_job_lrc_seqno(job));
return;
}
@@ -450,6 +462,7 @@ static void emit_job_gen12_copy(struct xe_sched_job *job)
for (i = 0; i < job->q->width; ++i)
__emit_job_gen12_simple(job, job->q->lrc[i],
job->ptrs[i].batch_addr,
+ &job->ptrs[i].head,
xe_sched_job_lrc_seqno(job));
}
@@ -461,6 +474,7 @@ static void emit_job_gen12_video(struct xe_sched_job *job)
for (i = 0; i < job->q->width; ++i)
__emit_job_gen12_video(job, job->q->lrc[i],
job->ptrs[i].batch_addr,
+ &job->ptrs[i].head,
xe_sched_job_lrc_seqno(job));
}
@@ -471,6 +485,7 @@ static void emit_job_gen12_render_compute(struct xe_sched_job *job)
for (i = 0; i < job->q->width; ++i)
__emit_job_gen12_render_compute(job, job->q->lrc[i],
job->ptrs[i].batch_addr,
+ &job->ptrs[i].head,
xe_sched_job_lrc_seqno(job));
}
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index b5f430d59f80..ed509b1c8cfc 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -133,10 +133,7 @@ static bool rule_matches(const struct xe_device *xe,
match = hwe->class != r->engine_class;
break;
case XE_RTP_MATCH_FUNC:
- if (drm_WARN_ON(&xe->drm, !gt))
- return false;
-
- match = r->match_func(gt, hwe);
+ match = r->match_func(xe, gt, hwe);
break;
default:
drm_warn(&xe->drm, "Invalid RTP match %u\n",
@@ -343,13 +340,15 @@ void xe_rtp_process(struct xe_rtp_process_ctx *ctx,
}
EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process);
-bool xe_rtp_match_even_instance(const struct xe_gt *gt,
+bool xe_rtp_match_even_instance(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe)
{
return hwe->instance % 2 == 0;
}
-bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
+bool xe_rtp_match_first_render_or_compute(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe)
{
u64 render_compute_mask = gt->info.engine_mask &
@@ -359,20 +358,30 @@ bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
hwe->engine_id == __ffs(render_compute_mask);
}
-bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt,
+bool xe_rtp_match_not_sriov_vf(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe)
{
- return !IS_SRIOV_VF(gt_to_xe(gt));
+ return !IS_SRIOV_VF(xe);
}
-bool xe_rtp_match_psmi_enabled(const struct xe_gt *gt,
+bool xe_rtp_match_psmi_enabled(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe)
{
- return xe_configfs_get_psmi_enabled(to_pci_dev(gt_to_xe(gt)->drm.dev));
+ return xe_configfs_get_psmi_enabled(to_pci_dev(xe->drm.dev));
}
-bool xe_rtp_match_gt_has_discontiguous_dss_groups(const struct xe_gt *gt,
+bool xe_rtp_match_gt_has_discontiguous_dss_groups(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe)
{
return xe_gt_has_discontiguous_dss_groups(gt);
}
+
+bool xe_rtp_match_has_flat_ccs(const struct xe_device *xe,
+ const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
+{
+ return xe->info.has_flat_ccs;
+}
diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h
index ac12ddf6cde6..ba5f940c0a96 100644
--- a/drivers/gpu/drm/xe/xe_rtp.h
+++ b/drivers/gpu/drm/xe/xe_rtp.h
@@ -440,18 +440,21 @@ void xe_rtp_process(struct xe_rtp_process_ctx *ctx,
/**
* xe_rtp_match_even_instance - Match if engine instance is even
+ * @xe: Device structure
* @gt: GT structure
* @hwe: Engine instance
*
* Returns: true if engine instance is even, false otherwise
*/
-bool xe_rtp_match_even_instance(const struct xe_gt *gt,
+bool xe_rtp_match_even_instance(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe);
/*
* xe_rtp_match_first_render_or_compute - Match if it's first render or compute
* engine in the GT
*
+ * @xe: Device structure
* @gt: GT structure
* @hwe: Engine instance
*
@@ -463,24 +466,41 @@ bool xe_rtp_match_even_instance(const struct xe_gt *gt,
* Returns: true if engine id is the first to match the render reset domain,
* false otherwise.
*/
-bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
+bool xe_rtp_match_first_render_or_compute(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe);
/*
* xe_rtp_match_not_sriov_vf - Match when not on SR-IOV VF device
*
+ * @xe: Device structure
* @gt: GT structure
* @hwe: Engine instance
*
* Returns: true if device is not VF, false otherwise.
*/
-bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt,
+bool xe_rtp_match_not_sriov_vf(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe);
-bool xe_rtp_match_psmi_enabled(const struct xe_gt *gt,
+bool xe_rtp_match_psmi_enabled(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe);
-bool xe_rtp_match_gt_has_discontiguous_dss_groups(const struct xe_gt *gt,
+bool xe_rtp_match_gt_has_discontiguous_dss_groups(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe);
+/**
+ * xe_rtp_match_has_flat_ccs - Match when platform has FlatCCS compression
+ * @xe: Device structure
+ * @gt: GT structure
+ * @hwe: Engine instance
+ *
+ * Returns: true if platform has FlatCCS compression, false otherwise
+ */
+bool xe_rtp_match_has_flat_ccs(const struct xe_device *xe,
+ const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_rtp_types.h b/drivers/gpu/drm/xe/xe_rtp_types.h
index f4cf30e298cf..6ba7f226c227 100644
--- a/drivers/gpu/drm/xe/xe_rtp_types.h
+++ b/drivers/gpu/drm/xe/xe_rtp_types.h
@@ -10,6 +10,7 @@
#include "regs/xe_reg_defs.h"
+struct xe_device;
struct xe_hw_engine;
struct xe_gt;
@@ -86,7 +87,8 @@ struct xe_rtp_rule {
u8 engine_class;
};
/* MATCH_FUNC */
- bool (*match_func)(const struct xe_gt *gt,
+ bool (*match_func)(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe);
};
};
diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
index dbf260dded8d..13e7a12b03ad 100644
--- a/drivers/gpu/drm/xe/xe_sched_job_types.h
+++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
@@ -24,6 +24,11 @@ struct xe_job_ptrs {
struct dma_fence_chain *chain_fence;
/** @batch_addr: Batch buffer address. */
u64 batch_addr;
+ /**
+ * @head: The tail pointer of the LRC (so head pointer of job) when the
+ * job was submitted
+ */
+ u32 head;
};
/**
@@ -58,6 +63,10 @@ struct xe_sched_job {
bool ring_ops_flush_tlb;
/** @ggtt: mapped in ggtt. */
bool ggtt;
+ /** @skip_emit: skip emitting the job */
+ bool skip_emit;
+ /** @last_replay: last job being replayed */
+ bool last_replay;
/** @ptrs: per instance pointers. */
struct xe_job_ptrs ptrs[];
};
diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c
index 7d2d6de2aabf..ea411944609b 100644
--- a/drivers/gpu/drm/xe/xe_sriov.c
+++ b/drivers/gpu/drm/xe/xe_sriov.c
@@ -167,6 +167,8 @@ const char *xe_sriov_function_name(unsigned int n, char *buf, size_t size)
*/
int xe_sriov_init_late(struct xe_device *xe)
{
+ if (IS_SRIOV_PF(xe))
+ return xe_sriov_pf_init_late(xe);
if (IS_SRIOV_VF(xe))
return xe_sriov_vf_init_late(xe);
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.c b/drivers/gpu/drm/xe/xe_sriov_pf.c
index 27ddf3cc80e9..bc1ab9ee31d9 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_sriov_pf.c
@@ -8,6 +8,7 @@
#include <drm/drm_managed.h>
#include "xe_assert.h"
+#include "xe_configfs.h"
#include "xe_device.h"
#include "xe_gt_sriov_pf.h"
#include "xe_module.h"
@@ -19,6 +20,8 @@
static unsigned int wanted_max_vfs(struct xe_device *xe)
{
+ if (IS_ENABLED(CONFIG_CONFIGFS_FS))
+ return xe_configfs_get_max_vfs(to_pci_dev(xe->drm.dev));
return xe_modparam.max_vfs;
}
@@ -104,6 +107,31 @@ int xe_sriov_pf_init_early(struct xe_device *xe)
}
/**
+ * xe_sriov_pf_init_late() - Late initialization of the SR-IOV PF.
+ * @xe: the &xe_device to initialize
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_init_late(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int err;
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_init(gt);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
* xe_sriov_pf_wait_ready() - Wait until PF is ready to operate.
* @xe: the &xe_device to test
*
@@ -146,45 +174,3 @@ void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p)
drm_printf(p, "supported: %u\n", xe->sriov.pf.driver_max_vfs);
drm_printf(p, "enabled: %u\n", pci_num_vf(pdev));
}
-
-static int simple_show(struct seq_file *m, void *data)
-{
- struct drm_printer p = drm_seq_file_printer(m);
- struct drm_info_node *node = m->private;
- struct dentry *parent = node->dent->d_parent;
- struct xe_device *xe = parent->d_inode->i_private;
- void (*print)(struct xe_device *, struct drm_printer *) = node->info_ent->data;
-
- print(xe, &p);
- return 0;
-}
-
-static const struct drm_info_list debugfs_list[] = {
- { .name = "vfs", .show = simple_show, .data = xe_sriov_pf_print_vfs_summary },
- { .name = "versions", .show = simple_show, .data = xe_sriov_pf_service_print_versions },
-};
-
-/**
- * xe_sriov_pf_debugfs_register - Register PF debugfs attributes.
- * @xe: the &xe_device
- * @root: the root &dentry
- *
- * Prepare debugfs attributes exposed by the PF.
- */
-void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root)
-{
- struct drm_minor *minor = xe->drm.primary;
- struct dentry *parent;
-
- /*
- * /sys/kernel/debug/dri/0/
- * ├── pf
- * │   ├── ...
- */
- parent = debugfs_create_dir("pf", root);
- if (IS_ERR(parent))
- return;
- parent->d_inode->i_private = xe;
-
- drm_debugfs_create_files(debugfs_list, ARRAY_SIZE(debugfs_list), parent, minor);
-}
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.h b/drivers/gpu/drm/xe/xe_sriov_pf.h
index e3b34f8f5e04..cba3fde9581f 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf.h
+++ b/drivers/gpu/drm/xe/xe_sriov_pf.h
@@ -15,23 +15,13 @@ struct xe_device;
#ifdef CONFIG_PCI_IOV
bool xe_sriov_pf_readiness(struct xe_device *xe);
int xe_sriov_pf_init_early(struct xe_device *xe);
+int xe_sriov_pf_init_late(struct xe_device *xe);
int xe_sriov_pf_wait_ready(struct xe_device *xe);
-void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root);
void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p);
#else
-static inline bool xe_sriov_pf_readiness(struct xe_device *xe)
-{
- return false;
-}
-
-static inline int xe_sriov_pf_init_early(struct xe_device *xe)
-{
- return 0;
-}
-
-static inline void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root)
-{
-}
+static inline bool xe_sriov_pf_readiness(struct xe_device *xe) { return false; }
+static inline int xe_sriov_pf_init_early(struct xe_device *xe) { return 0; }
+static inline int xe_sriov_pf_init_late(struct xe_device *xe) { return 0; }
#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_sriov_pf_control.c
new file mode 100644
index 000000000000..416d00a03fbb
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_control.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "xe_device.h"
+#include "xe_gt_sriov_pf_control.h"
+#include "xe_sriov_pf_control.h"
+#include "xe_sriov_printk.h"
+
+/**
+ * xe_sriov_pf_control_pause_vf() - Pause a VF on all GTs.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier (can't be 0 == PFID)
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_control_pause_vf(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int result = 0;
+ int err;
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_control_pause_vf(gt, vfid);
+ result = result ? -EUCLEAN : err;
+ }
+
+ if (result)
+ return result;
+
+ xe_sriov_info(xe, "VF%u paused!\n", vfid);
+ return 0;
+}
+
+/**
+ * xe_sriov_pf_control_resume_vf() - Resume a VF on all GTs.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_control_resume_vf(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int result = 0;
+ int err;
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_control_resume_vf(gt, vfid);
+ result = result ? -EUCLEAN : err;
+ }
+
+ if (result)
+ return result;
+
+ xe_sriov_info(xe, "VF%u resumed!\n", vfid);
+ return 0;
+}
+
+/**
+ * xe_sriov_pf_control_stop_vf - Stop a VF on all GTs.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_control_stop_vf(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int result = 0;
+ int err;
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_control_stop_vf(gt, vfid);
+ result = result ? -EUCLEAN : err;
+ }
+
+ if (result)
+ return result;
+
+ xe_sriov_info(xe, "VF%u stopped!\n", vfid);
+ return 0;
+}
+
+/**
+ * xe_sriov_pf_control_reset_vf() - Perform a VF reset (FLR).
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_control_reset_vf(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int result = 0;
+ int err;
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_control_trigger_flr(gt, vfid);
+ result = result ? -EUCLEAN : err;
+ }
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_control_wait_flr(gt, vfid);
+ result = result ? -EUCLEAN : err;
+ }
+
+ return result;
+}
+
+/**
+ * xe_sriov_pf_control_sync_flr() - Synchronize a VF FLR between all GTs.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_control_sync_flr(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int ret;
+
+ for_each_gt(gt, xe, id) {
+ ret = xe_gt_sriov_pf_control_sync_flr(gt, vfid, false);
+ if (ret < 0)
+ return ret;
+ }
+ for_each_gt(gt, xe, id) {
+ ret = xe_gt_sriov_pf_control_sync_flr(gt, vfid, true);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_control.h b/drivers/gpu/drm/xe/xe_sriov_pf_control.h
new file mode 100644
index 000000000000..2d52d0ac1b28
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_control.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_CONTROL_H_
+#define _XE_SRIOV_PF_CONTROL_H_
+
+struct xe_device;
+
+int xe_sriov_pf_control_pause_vf(struct xe_device *xe, unsigned int vfid);
+int xe_sriov_pf_control_resume_vf(struct xe_device *xe, unsigned int vfid);
+int xe_sriov_pf_control_stop_vf(struct xe_device *xe, unsigned int vfid);
+int xe_sriov_pf_control_reset_vf(struct xe_device *xe, unsigned int vfid);
+int xe_sriov_pf_control_sync_flr(struct xe_device *xe, unsigned int vfid);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c
new file mode 100644
index 000000000000..a81aa05c5532
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+#include <drm/drm_debugfs.h>
+
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_pm.h"
+#include "xe_sriov_pf.h"
+#include "xe_sriov_pf_control.h"
+#include "xe_sriov_pf_debugfs.h"
+#include "xe_sriov_pf_helpers.h"
+#include "xe_sriov_pf_provision.h"
+#include "xe_sriov_pf_service.h"
+#include "xe_sriov_printk.h"
+#include "xe_tile_sriov_pf_debugfs.h"
+
+/*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov # d_inode->i_private = (xe_device*)
+ * │ ├── pf # d_inode->i_private = (xe_device*)
+ * │ ├── vf1 # d_inode->i_private = VFID(1)
+ * : :
+ * │ ├── vfN # d_inode->i_private = VFID(N)
+ */
+
+static void *extract_priv(struct dentry *d)
+{
+ return d->d_inode->i_private;
+}
+
+static struct xe_device *extract_xe(struct dentry *d)
+{
+ return extract_priv(d->d_parent);
+}
+
+static unsigned int extract_vfid(struct dentry *d)
+{
+ void *p = extract_priv(d);
+
+ return p == extract_xe(d) ? PFID : (uintptr_t)p;
+}
+
+/*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * │ ├── restore_auto_provisioning
+ * │ :
+ * │ ├── pf/
+ * │ ├── vf1
+ * │ │ ├── ...
+ */
+
+static ssize_t from_file_write_to_xe_call(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos,
+ int (*call)(struct xe_device *))
+{
+ struct dentry *dent = file_dentry(file);
+ struct xe_device *xe = extract_xe(dent);
+ bool yes;
+ int ret;
+
+ if (*ppos)
+ return -EINVAL;
+ ret = kstrtobool_from_user(userbuf, count, &yes);
+ if (ret < 0)
+ return ret;
+ if (yes) {
+ xe_pm_runtime_get(xe);
+ ret = call(xe);
+ xe_pm_runtime_put(xe);
+ }
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+#define DEFINE_SRIOV_ATTRIBUTE(OP) \
+static int OP##_show(struct seq_file *s, void *unused) \
+{ \
+ return 0; \
+} \
+static ssize_t OP##_write(struct file *file, const char __user *userbuf, \
+ size_t count, loff_t *ppos) \
+{ \
+ return from_file_write_to_xe_call(file, userbuf, count, ppos, \
+ xe_sriov_pf_##OP); \
+} \
+DEFINE_SHOW_STORE_ATTRIBUTE(OP)
+
+static inline int xe_sriov_pf_restore_auto_provisioning(struct xe_device *xe)
+{
+ return xe_sriov_pf_provision_set_mode(xe, XE_SRIOV_PROVISIONING_MODE_AUTO);
+}
+
+DEFINE_SRIOV_ATTRIBUTE(restore_auto_provisioning);
+
+static void pf_populate_root(struct xe_device *xe, struct dentry *dent)
+{
+ debugfs_create_file("restore_auto_provisioning", 0200, dent, xe,
+ &restore_auto_provisioning_fops);
+}
+
+static int simple_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_info_node *node = m->private;
+ struct dentry *parent = node->dent->d_parent;
+ struct xe_device *xe = parent->d_inode->i_private;
+ void (*print)(struct xe_device *, struct drm_printer *) = node->info_ent->data;
+
+ print(xe, &p);
+ return 0;
+}
+
+static const struct drm_info_list debugfs_list[] = {
+ { .name = "vfs", .show = simple_show, .data = xe_sriov_pf_print_vfs_summary },
+ { .name = "versions", .show = simple_show, .data = xe_sriov_pf_service_print_versions },
+};
+
+static void pf_populate_pf(struct xe_device *xe, struct dentry *pfdent)
+{
+ struct drm_minor *minor = xe->drm.primary;
+
+ drm_debugfs_create_files(debugfs_list, ARRAY_SIZE(debugfs_list), pfdent, minor);
+}
+
+/*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * │ ├── vf1
+ * │ │ ├── pause
+ * │ │ ├── reset
+ * │ │ ├── resume
+ * │ │ ├── stop
+ * │ │ :
+ * │ ├── vf2
+ * │ │ ├── ...
+ */
+
+static ssize_t from_file_write_to_vf_call(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos,
+ int (*call)(struct xe_device *, unsigned int))
+{
+ struct dentry *dent = file_dentry(file)->d_parent;
+ struct xe_device *xe = extract_xe(dent);
+ unsigned int vfid = extract_vfid(dent);
+ bool yes;
+ int ret;
+
+ if (*ppos)
+ return -EINVAL;
+ ret = kstrtobool_from_user(userbuf, count, &yes);
+ if (ret < 0)
+ return ret;
+ if (yes) {
+ xe_pm_runtime_get(xe);
+ ret = call(xe, vfid);
+ xe_pm_runtime_put(xe);
+ }
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+#define DEFINE_VF_CONTROL_ATTRIBUTE(OP) \
+static int OP##_show(struct seq_file *s, void *unused) \
+{ \
+ return 0; \
+} \
+static ssize_t OP##_write(struct file *file, const char __user *userbuf, \
+ size_t count, loff_t *ppos) \
+{ \
+ return from_file_write_to_vf_call(file, userbuf, count, ppos, \
+ xe_sriov_pf_control_##OP); \
+} \
+DEFINE_SHOW_STORE_ATTRIBUTE(OP)
+
+DEFINE_VF_CONTROL_ATTRIBUTE(pause_vf);
+DEFINE_VF_CONTROL_ATTRIBUTE(resume_vf);
+DEFINE_VF_CONTROL_ATTRIBUTE(stop_vf);
+DEFINE_VF_CONTROL_ATTRIBUTE(reset_vf);
+
+static void pf_populate_vf(struct xe_device *xe, struct dentry *vfdent)
+{
+ debugfs_create_file("pause", 0200, vfdent, xe, &pause_vf_fops);
+ debugfs_create_file("resume", 0200, vfdent, xe, &resume_vf_fops);
+ debugfs_create_file("stop", 0200, vfdent, xe, &stop_vf_fops);
+ debugfs_create_file("reset", 0200, vfdent, xe, &reset_vf_fops);
+}
+
+static void pf_populate_with_tiles(struct xe_device *xe, struct dentry *dent, unsigned int vfid)
+{
+ struct xe_tile *tile;
+ unsigned int id;
+
+ for_each_tile(tile, xe, id)
+ xe_tile_sriov_pf_debugfs_populate(tile, dent, vfid);
+}
+
+/**
+ * xe_sriov_pf_debugfs_register - Register PF debugfs attributes.
+ * @xe: the &xe_device
+ * @root: the root &dentry
+ *
+ * Create separate directory that will contain all SR-IOV related files,
+ * organized per each SR-IOV function (PF, VF1, VF2, ..., VFn).
+ */
+void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root)
+{
+ int totalvfs = xe_sriov_pf_get_totalvfs(xe);
+ struct dentry *pfdent;
+ struct dentry *vfdent;
+ struct dentry *dent;
+ char vfname[16]; /* should be more than enough for "vf%u\0" and VFID(UINT_MAX) */
+ unsigned int n;
+
+ /*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov # d_inode->i_private = (xe_device*)
+ * │ ├── ...
+ */
+ dent = debugfs_create_dir("sriov", root);
+ if (IS_ERR(dent))
+ return;
+ dent->d_inode->i_private = xe;
+
+ pf_populate_root(xe, dent);
+
+ /*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov # d_inode->i_private = (xe_device*)
+ * │ ├── pf # d_inode->i_private = (xe_device*)
+ * │ │ ├── ...
+ */
+ pfdent = debugfs_create_dir("pf", dent);
+ if (IS_ERR(pfdent))
+ return;
+ pfdent->d_inode->i_private = xe;
+
+ pf_populate_pf(xe, pfdent);
+ pf_populate_with_tiles(xe, pfdent, PFID);
+
+ /*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov # d_inode->i_private = (xe_device*)
+ * │ ├── vf1 # d_inode->i_private = VFID(1)
+ * │ ├── vf2 # d_inode->i_private = VFID(2)
+ * │ ├── ...
+ */
+ for (n = 1; n <= totalvfs; n++) {
+ snprintf(vfname, sizeof(vfname), "vf%u", VFID(n));
+ vfdent = debugfs_create_dir(vfname, dent);
+ if (IS_ERR(vfdent))
+ return;
+ vfdent->d_inode->i_private = (void *)(uintptr_t)VFID(n);
+
+ pf_populate_vf(xe, vfdent);
+ pf_populate_with_tiles(xe, vfdent, VFID(n));
+ }
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.h b/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.h
new file mode 100644
index 000000000000..93db13585b82
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_DEBUGFS_H_
+#define _XE_SRIOV_PF_DEBUGFS_H_
+
+struct dentry;
+struct xe_device;
+
+#ifdef CONFIG_PCI_IOV
+void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root);
+#else
+static inline void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root) { }
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h b/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h
index dd1df950b021..4a4340fb633a 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h
@@ -37,6 +37,17 @@ static inline int xe_sriov_pf_get_totalvfs(struct xe_device *xe)
return xe->sriov.pf.driver_max_vfs;
}
+/**
+ * xe_sriov_pf_num_vfs() - Number of enabled VFs on the PF.
+ * @xe: the PF &xe_device
+ *
+ * Return: Number of enabled VFs on the PF.
+ */
+static inline unsigned int xe_sriov_pf_num_vfs(const struct xe_device *xe)
+{
+ return pci_num_vf(to_pci_dev(xe->drm.dev));
+}
+
static inline struct mutex *xe_sriov_pf_master_mutex(struct xe_device *xe)
{
xe_assert(xe, IS_SRIOV_PF(xe));
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_provision.c b/drivers/gpu/drm/xe/xe_sriov_pf_provision.c
new file mode 100644
index 000000000000..663fb0c045e9
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_provision.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "xe_assert.h"
+#include "xe_device.h"
+#include "xe_gt_sriov_pf_config.h"
+#include "xe_sriov.h"
+#include "xe_sriov_pf_helpers.h"
+#include "xe_sriov_pf_provision.h"
+#include "xe_sriov_pf_provision_types.h"
+#include "xe_sriov_printk.h"
+
+static const char *mode_to_string(enum xe_sriov_provisioning_mode mode)
+{
+ switch (mode) {
+ case XE_SRIOV_PROVISIONING_MODE_AUTO:
+ return "auto";
+ case XE_SRIOV_PROVISIONING_MODE_CUSTOM:
+ return "custom";
+ default:
+ return "<invalid>";
+ }
+}
+
+static bool pf_auto_provisioning_mode(struct xe_device *xe)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ return xe->sriov.pf.provision.mode == XE_SRIOV_PROVISIONING_MODE_AUTO;
+}
+
+static bool pf_needs_provisioning(struct xe_gt *gt, unsigned int num_vfs)
+{
+ unsigned int n;
+
+ for (n = 1; n <= num_vfs; n++)
+ if (!xe_gt_sriov_pf_config_is_empty(gt, n))
+ return false;
+
+ return true;
+}
+
+static int pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int result = 0;
+ int err;
+
+ for_each_gt(gt, xe, id) {
+ if (!pf_needs_provisioning(gt, num_vfs))
+ return -EUCLEAN;
+ err = xe_gt_sriov_pf_config_set_fair(gt, VFID(1), num_vfs);
+ result = result ?: err;
+ }
+
+ return result;
+}
+
+static void pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ unsigned int n;
+
+ for_each_gt(gt, xe, id)
+ for (n = 1; n <= num_vfs; n++)
+ xe_gt_sriov_pf_config_release(gt, n, true);
+}
+
+static void pf_unprovision_all_vfs(struct xe_device *xe)
+{
+ pf_unprovision_vfs(xe, xe_sriov_pf_get_totalvfs(xe));
+}
+
+/**
+ * xe_sriov_pf_provision_vfs() - Provision VFs in auto-mode.
+ * @xe: the PF &xe_device
+ * @num_vfs: the number of VFs to auto-provision
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ if (!pf_auto_provisioning_mode(xe))
+ return 0;
+
+ return pf_provision_vfs(xe, num_vfs);
+}
+
+/**
+ * xe_sriov_pf_unprovision_vfs() - Unprovision VFs in auto-mode.
+ * @xe: the PF &xe_device
+ * @num_vfs: the number of VFs to unprovision
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ if (!pf_auto_provisioning_mode(xe))
+ return 0;
+
+ pf_unprovision_vfs(xe, num_vfs);
+ return 0;
+}
+
+/**
+ * xe_sriov_pf_provision_set_mode() - Change VFs provision mode.
+ * @xe: the PF &xe_device
+ * @mode: the new VFs provisioning mode
+ *
+ * When changing from AUTO to CUSTOM mode, any already allocated VFs resources
+ * will remain allocated and will not be released upon VFs disabling.
+ *
+ * When changing back to AUTO mode, if VFs are not enabled, already allocated
+ * VFs resources will be immediately released. If VFs are still enabled, such
+ * mode change is rejected.
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_provision_set_mode(struct xe_device *xe, enum xe_sriov_provisioning_mode mode)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ if (mode == xe->sriov.pf.provision.mode)
+ return 0;
+
+ if (mode == XE_SRIOV_PROVISIONING_MODE_AUTO) {
+ if (xe_sriov_pf_num_vfs(xe)) {
+ xe_sriov_dbg(xe, "can't restore %s: VFs must be disabled!\n",
+ mode_to_string(mode));
+ return -EBUSY;
+ }
+ pf_unprovision_all_vfs(xe);
+ }
+
+ xe_sriov_dbg(xe, "mode %s changed to %s by %ps\n",
+ mode_to_string(xe->sriov.pf.provision.mode),
+ mode_to_string(mode), __builtin_return_address(0));
+ xe->sriov.pf.provision.mode = mode;
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_provision.h b/drivers/gpu/drm/xe/xe_sriov_pf_provision.h
new file mode 100644
index 000000000000..cf3657a32e90
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_provision.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_PROVISION_H_
+#define _XE_SRIOV_PF_PROVISION_H_
+
+#include "xe_sriov_pf_provision_types.h"
+
+struct xe_device;
+
+int xe_sriov_pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs);
+int xe_sriov_pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs);
+
+int xe_sriov_pf_provision_set_mode(struct xe_device *xe, enum xe_sriov_provisioning_mode mode);
+
+/**
+ * xe_sriov_pf_provision_set_custom_mode() - Change VFs provision mode to custom.
+ * @xe: the PF &xe_device
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static inline int xe_sriov_pf_provision_set_custom_mode(struct xe_device *xe)
+{
+ return xe_sriov_pf_provision_set_mode(xe, XE_SRIOV_PROVISIONING_MODE_CUSTOM);
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_provision_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_provision_types.h
new file mode 100644
index 000000000000..a847b8a4c4da
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_provision_types.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_PROVISION_TYPES_H_
+#define _XE_SRIOV_PF_PROVISION_TYPES_H_
+
+#include <linux/build_bug.h>
+
+/**
+ * enum xe_sriov_provisioning_mode - SR-IOV provisioning mode.
+ *
+ * @XE_SRIOV_PROVISIONING_MODE_AUTO: VFs are provisioned during VFs enabling.
+ * Any allocated resources to the VFs will be
+ * automatically released when disabling VFs.
+ * This is a default mode.
+ * @XE_SRIOV_PROVISIONING_MODE_CUSTOM: Explicit VFs provisioning using uABI interfaces.
+ * VFs resources remains allocated regardless if
+ * VFs are enabled or not.
+ */
+enum xe_sriov_provisioning_mode {
+ XE_SRIOV_PROVISIONING_MODE_AUTO,
+ XE_SRIOV_PROVISIONING_MODE_CUSTOM,
+};
+static_assert(XE_SRIOV_PROVISIONING_MODE_AUTO == 0);
+
+/**
+ * struct xe_sriov_pf_provision - Data used by the PF provisioning.
+ */
+struct xe_sriov_pf_provision {
+ /** @mode: selected provisioning mode. */
+ enum xe_sriov_provisioning_mode mode;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_types.h
index 956a88f9f213..c753cd59aed2 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_types.h
@@ -9,6 +9,7 @@
#include <linux/mutex.h>
#include <linux/types.h>
+#include "xe_sriov_pf_provision_types.h"
#include "xe_sriov_pf_service_types.h"
/**
@@ -35,6 +36,9 @@ struct xe_device_pf {
/** @master_lock: protects all VFs configurations across GTs */
struct mutex master_lock;
+ /** @provision: device level provisioning data. */
+ struct xe_sriov_pf_provision provision;
+
/** @service: device level service data. */
struct xe_sriov_pf_service service;
diff --git a/drivers/gpu/drm/xe/xe_sriov_printk.h b/drivers/gpu/drm/xe/xe_sriov_printk.h
index 117e1d541692..4c6b5c3d2190 100644
--- a/drivers/gpu/drm/xe/xe_sriov_printk.h
+++ b/drivers/gpu/drm/xe/xe_sriov_printk.h
@@ -1,22 +1,22 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2023 Intel Corporation
+ * Copyright © 2023-2025 Intel Corporation
*/
#ifndef _XE_SRIOV_PRINTK_H_
#define _XE_SRIOV_PRINTK_H_
-#include <drm/drm_print.h>
-
-#include "xe_device_types.h"
-#include "xe_sriov_types.h"
+#include "xe_printk.h"
#define xe_sriov_printk_prefix(xe) \
((xe)->sriov.__mode == XE_SRIOV_MODE_PF ? "PF: " : \
(xe)->sriov.__mode == XE_SRIOV_MODE_VF ? "VF: " : "")
+#define __XE_SRIOV_PRINTK_FMT(_xe, _fmt, _args...) \
+ "%s" _fmt, xe_sriov_printk_prefix(_xe), ##_args
+
#define xe_sriov_printk(xe, _level, fmt, ...) \
- drm_##_level(&(xe)->drm, "%s" fmt, xe_sriov_printk_prefix(xe), ##__VA_ARGS__)
+ xe_##_level((xe), __XE_SRIOV_PRINTK_FMT((xe), fmt, ##__VA_ARGS__))
#define xe_sriov_err(xe, fmt, ...) \
xe_sriov_printk((xe), err, fmt, ##__VA_ARGS__)
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.c b/drivers/gpu/drm/xe/xe_sriov_vf.c
index cdd9f8e78b2a..911d5720917b 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.c
@@ -6,22 +6,12 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_managed.h>
-#include "xe_assert.h"
-#include "xe_device.h"
#include "xe_gt.h"
-#include "xe_gt_sriov_printk.h"
#include "xe_gt_sriov_vf.h"
#include "xe_guc.h"
-#include "xe_guc_ct.h"
-#include "xe_guc_submit.h"
-#include "xe_irq.h"
-#include "xe_lrc.h"
-#include "xe_pm.h"
-#include "xe_sriov.h"
#include "xe_sriov_printk.h"
#include "xe_sriov_vf.h"
#include "xe_sriov_vf_ccs.h"
-#include "xe_tile_sriov_vf.h"
/**
* DOC: VF restore procedure in PF KMD and VF KMD
@@ -159,8 +149,6 @@ static void vf_disable_migration(struct xe_device *xe, const char *fmt, ...)
xe->sriov.vf.migration.enabled = false;
}
-static void migration_worker_func(struct work_struct *w);
-
static void vf_migration_init_early(struct xe_device *xe)
{
/*
@@ -185,8 +173,6 @@ static void vf_migration_init_early(struct xe_device *xe)
guc_version.major, guc_version.minor);
}
- INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func);
-
xe->sriov.vf.migration.enabled = true;
xe_sriov_dbg(xe, "migration support enabled\n");
}
@@ -201,235 +187,6 @@ void xe_sriov_vf_init_early(struct xe_device *xe)
}
/**
- * vf_post_migration_shutdown - Stop the driver activities after VF migration.
- * @xe: the &xe_device struct instance
- *
- * After this VM is migrated and assigned to a new VF, it is running on a new
- * hardware, and therefore many hardware-dependent states and related structures
- * require fixups. Without fixups, the hardware cannot do any work, and therefore
- * all GPU pipelines are stalled.
- * Stop some of kernel activities to make the fixup process faster.
- */
-static void vf_post_migration_shutdown(struct xe_device *xe)
-{
- struct xe_gt *gt;
- unsigned int id;
- int ret = 0;
-
- for_each_gt(gt, xe, id) {
- xe_guc_submit_pause(&gt->uc.guc);
- ret |= xe_guc_submit_reset_block(&gt->uc.guc);
- }
-
- if (ret)
- drm_info(&xe->drm, "migration recovery encountered ongoing reset\n");
-}
-
-/**
- * vf_post_migration_kickstart - Re-start the driver activities under new hardware.
- * @xe: the &xe_device struct instance
- *
- * After we have finished with all post-migration fixups, restart the driver
- * activities to continue feeding the GPU with workloads.
- */
-static void vf_post_migration_kickstart(struct xe_device *xe)
-{
- struct xe_gt *gt;
- unsigned int id;
-
- /*
- * Make sure interrupts on the new HW are properly set. The GuC IRQ
- * must be working at this point, since the recovery did started,
- * but the rest was not enabled using the procedure from spec.
- */
- xe_irq_resume(xe);
-
- for_each_gt(gt, xe, id) {
- xe_guc_submit_reset_unblock(&gt->uc.guc);
- xe_guc_submit_unpause(&gt->uc.guc);
- }
-}
-
-static bool gt_vf_post_migration_needed(struct xe_gt *gt)
-{
- return test_bit(gt->info.id, &gt_to_xe(gt)->sriov.vf.migration.gt_flags);
-}
-
-/*
- * Notify GuCs marked in flags about resource fixups apply finished.
- * @xe: the &xe_device struct instance
- * @gt_flags: flags marking to which GTs the notification shall be sent
- */
-static int vf_post_migration_notify_resfix_done(struct xe_device *xe, unsigned long gt_flags)
-{
- struct xe_gt *gt;
- unsigned int id;
- int err = 0;
-
- for_each_gt(gt, xe, id) {
- if (!test_bit(id, &gt_flags))
- continue;
- /* skip asking GuC for RESFIX exit if new recovery request arrived */
- if (gt_vf_post_migration_needed(gt))
- continue;
- err = xe_gt_sriov_vf_notify_resfix_done(gt);
- if (err)
- break;
- clear_bit(id, &gt_flags);
- }
-
- if (gt_flags && !err)
- drm_dbg(&xe->drm, "another recovery imminent, skipped some notifications\n");
- return err;
-}
-
-static int vf_get_next_migrated_gt_id(struct xe_device *xe)
-{
- struct xe_gt *gt;
- unsigned int id;
-
- for_each_gt(gt, xe, id) {
- if (test_and_clear_bit(id, &xe->sriov.vf.migration.gt_flags))
- return id;
- }
- return -1;
-}
-
-static size_t post_migration_scratch_size(struct xe_device *xe)
-{
- return max(xe_lrc_reg_size(xe), LRC_WA_BB_SIZE);
-}
-
-/**
- * Perform post-migration fixups on a single GT.
- *
- * After migration, GuC needs to be re-queried for VF configuration to check
- * if it matches previous provisioning. Most of VF provisioning shall be the
- * same, except GGTT range, since GGTT is not virtualized per-VF. If GGTT
- * range has changed, we have to perform fixups - shift all GGTT references
- * used anywhere within the driver. After the fixups in this function succeed,
- * it is allowed to ask the GuC bound to this GT to continue normal operation.
- *
- * Returns: 0 if the operation completed successfully, or a negative error
- * code otherwise.
- */
-static int gt_vf_post_migration_fixups(struct xe_gt *gt)
-{
- s64 shift;
- void *buf;
- int err;
-
- buf = kmalloc(post_migration_scratch_size(gt_to_xe(gt)), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- err = xe_gt_sriov_vf_query_config(gt);
- if (err)
- goto out;
-
- shift = xe_gt_sriov_vf_ggtt_shift(gt);
- if (shift) {
- xe_tile_sriov_vf_fixup_ggtt_nodes(gt_to_tile(gt), shift);
- xe_gt_sriov_vf_default_lrcs_hwsp_rebase(gt);
- err = xe_guc_contexts_hwsp_rebase(&gt->uc.guc, buf);
- if (err)
- goto out;
- xe_guc_jobs_ring_rebase(&gt->uc.guc);
- xe_guc_ct_fixup_messages_with_ggtt(&gt->uc.guc.ct, shift);
- }
-
-out:
- kfree(buf);
- return err;
-}
-
-static void vf_post_migration_recovery(struct xe_device *xe)
-{
- unsigned long fixed_gts = 0;
- int id, err;
-
- drm_dbg(&xe->drm, "migration recovery in progress\n");
- xe_pm_runtime_get(xe);
- vf_post_migration_shutdown(xe);
-
- if (!xe_sriov_vf_migration_supported(xe)) {
- xe_sriov_err(xe, "migration is not supported\n");
- err = -ENOTRECOVERABLE;
- goto fail;
- }
-
- while (id = vf_get_next_migrated_gt_id(xe), id >= 0) {
- struct xe_gt *gt = xe_device_get_gt(xe, id);
-
- err = gt_vf_post_migration_fixups(gt);
- if (err)
- goto fail;
-
- set_bit(id, &fixed_gts);
- }
-
- vf_post_migration_kickstart(xe);
- err = vf_post_migration_notify_resfix_done(xe, fixed_gts);
- if (err)
- goto fail;
-
- xe_pm_runtime_put(xe);
- drm_notice(&xe->drm, "migration recovery ended\n");
- return;
-fail:
- xe_pm_runtime_put(xe);
- drm_err(&xe->drm, "migration recovery failed (%pe)\n", ERR_PTR(err));
- xe_device_declare_wedged(xe);
-}
-
-static void migration_worker_func(struct work_struct *w)
-{
- struct xe_device *xe = container_of(w, struct xe_device,
- sriov.vf.migration.worker);
-
- vf_post_migration_recovery(xe);
-}
-
-/*
- * Check if post-restore recovery is coming on any of GTs.
- * @xe: the &xe_device struct instance
- *
- * Return: True if migration recovery worker will soon be running. Any worker currently
- * executing does not affect the result.
- */
-static bool vf_ready_to_recovery_on_any_gts(struct xe_device *xe)
-{
- struct xe_gt *gt;
- unsigned int id;
-
- for_each_gt(gt, xe, id) {
- if (test_bit(id, &xe->sriov.vf.migration.gt_flags))
- return true;
- }
- return false;
-}
-
-/**
- * xe_sriov_vf_start_migration_recovery - Start VF migration recovery.
- * @xe: the &xe_device to start recovery on
- *
- * This function shall be called only by VF.
- */
-void xe_sriov_vf_start_migration_recovery(struct xe_device *xe)
-{
- bool started;
-
- xe_assert(xe, IS_SRIOV_VF(xe));
-
- if (!vf_ready_to_recovery_on_any_gts(xe))
- return;
-
- started = queue_work(xe->sriov.wq, &xe->sriov.vf.migration.worker);
- drm_info(&xe->drm, "VF migration recovery %s\n", started ?
- "scheduled" : "already in progress");
-}
-
-/**
* xe_sriov_vf_init_late() - SR-IOV VF late initialization functions.
* @xe: the &xe_device to initialize
*
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.h b/drivers/gpu/drm/xe/xe_sriov_vf.h
index 9e752105ec2a..4df95266b261 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.h
@@ -13,7 +13,6 @@ struct xe_device;
void xe_sriov_vf_init_early(struct xe_device *xe);
int xe_sriov_vf_init_late(struct xe_device *xe);
-void xe_sriov_vf_start_migration_recovery(struct xe_device *xe);
bool xe_sriov_vf_migration_supported(struct xe_device *xe);
void xe_sriov_vf_debugfs_register(struct xe_device *xe, struct dentry *root);
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
index 8dec616c37c9..790249801364 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
@@ -175,6 +175,15 @@ static void ccs_rw_update_ring(struct xe_sriov_vf_ccs_ctx *ctx)
struct xe_lrc *lrc = xe_exec_queue_lrc(ctx->mig_q);
u32 dw[10], i = 0;
+ /*
+ * XXX: Save/restore fixes — for some reason, the GuC only accepts the
+ * save/restore context if the LRC head pointer is zero. This is evident
+ * from repeated VF migrations failing when the LRC head pointer is
+ * non-zero.
+ */
+ lrc->ring.tail = 0;
+ xe_lrc_set_ring_head(lrc, 0);
+
dw[i++] = MI_ARB_ON_OFF | MI_ARB_ENABLE;
dw[i++] = MI_BATCH_BUFFER_START | XE_INSTR_NUM_DW(3);
dw[i++] = lower_32_bits(addr);
@@ -186,6 +195,25 @@ static void ccs_rw_update_ring(struct xe_sriov_vf_ccs_ctx *ctx)
xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
}
+/**
+ * xe_sriov_vf_ccs_rebase - Rebase GGTT addresses for CCS save / restore
+ * @xe: the &xe_device.
+ */
+void xe_sriov_vf_ccs_rebase(struct xe_device *xe)
+{
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
+
+ if (!IS_VF_CCS_READY(xe))
+ return;
+
+ for_each_ccs_rw_ctx(ctx_id) {
+ struct xe_sriov_vf_ccs_ctx *ctx =
+ &xe->sriov.vf.ccs.contexts[ctx_id];
+
+ ccs_rw_update_ring(ctx);
+ }
+}
+
static int register_save_restore_context(struct xe_sriov_vf_ccs_ctx *ctx)
{
int ctx_type;
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h
index 0745c0ff0228..f8ca6efce9ee 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h
@@ -18,6 +18,7 @@ int xe_sriov_vf_ccs_init(struct xe_device *xe);
int xe_sriov_vf_ccs_attach_bo(struct xe_bo *bo);
int xe_sriov_vf_ccs_detach_bo(struct xe_bo *bo);
int xe_sriov_vf_ccs_register_context(struct xe_device *xe);
+void xe_sriov_vf_ccs_rebase(struct xe_device *xe);
void xe_sriov_vf_ccs_print(struct xe_device *xe, struct drm_printer *p);
static inline bool xe_sriov_vf_ccs_ready(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_sriov_vf_types.h
index 426cc5841958..6a0fd0f5463e 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_types.h
@@ -33,10 +33,6 @@ struct xe_device_vf {
/** @migration: VF Migration state data */
struct {
- /** @migration.worker: VF migration recovery worker */
- struct work_struct worker;
- /** @migration.gt_flags: Per-GT request flags for VF migration recovery */
- unsigned long gt_flags;
/**
* @migration.enabled: flag indicating if migration support
* was enabled or not due to missing prerequisites
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index d49ba3401963..4f4f9a5c43af 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -19,9 +19,9 @@
#include "xe_tile.h"
#include "xe_tile_sysfs.h"
#include "xe_ttm_vram_mgr.h"
-#include "xe_wa.h"
#include "xe_vram.h"
#include "xe_vram_types.h"
+#include "xe_wa.h"
/**
* DOC: Multi-tile Design
@@ -124,6 +124,14 @@ int xe_tile_alloc_vram(struct xe_tile *tile)
return -ENOMEM;
tile->mem.vram = vram;
+ /*
+ * If the kernel_vram is not already allocated,
+ * it means that tile has common VRAM region for
+ * kernel and user space.
+ */
+ if (!tile->mem.kernel_vram)
+ tile->mem.kernel_vram = tile->mem.vram;
+
return 0;
}
@@ -149,10 +157,6 @@ int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id)
if (err)
return err;
- tile->primary_gt = xe_gt_alloc(tile);
- if (IS_ERR(tile->primary_gt))
- return PTR_ERR(tile->primary_gt);
-
xe_pcode_init(tile);
return 0;
diff --git a/drivers/gpu/drm/xe/xe_tile_debugfs.c b/drivers/gpu/drm/xe/xe_tile_debugfs.c
index 5523874cba7b..fff242a5ae56 100644
--- a/drivers/gpu/drm/xe/xe_tile_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_tile_debugfs.c
@@ -6,6 +6,7 @@
#include <linux/debugfs.h>
#include <drm/drm_debugfs.h>
+#include "xe_ggtt.h"
#include "xe_pm.h"
#include "xe_sa.h"
#include "xe_tile_debugfs.h"
@@ -16,7 +17,7 @@ static struct xe_tile *node_to_tile(struct drm_info_node *node)
}
/**
- * tile_debugfs_simple_show - A show callback for struct drm_info_list
+ * xe_tile_debugfs_simple_show() - A show callback for struct drm_info_list
* @m: the &seq_file
* @data: data used by the drm debugfs helpers
*
@@ -57,7 +58,7 @@ static struct xe_tile *node_to_tile(struct drm_info_node *node)
*
* Return: 0 on success or a negative error code on failure.
*/
-static int tile_debugfs_simple_show(struct seq_file *m, void *data)
+int xe_tile_debugfs_simple_show(struct seq_file *m, void *data)
{
struct drm_printer p = drm_seq_file_printer(m);
struct drm_info_node *node = m->private;
@@ -68,7 +69,7 @@ static int tile_debugfs_simple_show(struct seq_file *m, void *data)
}
/**
- * tile_debugfs_show_with_rpm - A show callback for struct drm_info_list
+ * xe_tile_debugfs_show_with_rpm() - A show callback for struct drm_info_list
* @m: the &seq_file
* @data: data used by the drm debugfs helpers
*
@@ -76,7 +77,7 @@ static int tile_debugfs_simple_show(struct seq_file *m, void *data)
*
* Return: 0 on success or a negative error code on failure.
*/
-static int tile_debugfs_show_with_rpm(struct seq_file *m, void *data)
+int xe_tile_debugfs_show_with_rpm(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct xe_tile *tile = node_to_tile(node);
@@ -84,12 +85,17 @@ static int tile_debugfs_show_with_rpm(struct seq_file *m, void *data)
int ret;
xe_pm_runtime_get(xe);
- ret = tile_debugfs_simple_show(m, data);
+ ret = xe_tile_debugfs_simple_show(m, data);
xe_pm_runtime_put(xe);
return ret;
}
+static int ggtt(struct xe_tile *tile, struct drm_printer *p)
+{
+ return xe_ggtt_dump(tile->mem.ggtt, p);
+}
+
static int sa_info(struct xe_tile *tile, struct drm_printer *p)
{
drm_suballoc_dump_debug_info(&tile->mem.kernel_bb_pool->base, p,
@@ -100,7 +106,8 @@ static int sa_info(struct xe_tile *tile, struct drm_printer *p)
/* only for debugfs files which can be safely used on the VF */
static const struct drm_info_list vf_safe_debugfs_list[] = {
- { "sa_info", .show = tile_debugfs_show_with_rpm, .data = sa_info },
+ { "ggtt", .show = xe_tile_debugfs_show_with_rpm, .data = ggtt },
+ { "sa_info", .show = xe_tile_debugfs_show_with_rpm, .data = sa_info },
};
/**
diff --git a/drivers/gpu/drm/xe/xe_tile_debugfs.h b/drivers/gpu/drm/xe/xe_tile_debugfs.h
index 0e5f724de37f..4429c22542f4 100644
--- a/drivers/gpu/drm/xe/xe_tile_debugfs.h
+++ b/drivers/gpu/drm/xe/xe_tile_debugfs.h
@@ -6,8 +6,11 @@
#ifndef _XE_TILE_DEBUGFS_H_
#define _XE_TILE_DEBUGFS_H_
+struct seq_file;
struct xe_tile;
void xe_tile_debugfs_register(struct xe_tile *tile);
+int xe_tile_debugfs_simple_show(struct seq_file *m, void *data);
+int xe_tile_debugfs_show_with_rpm(struct seq_file *m, void *data);
#endif
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.c
new file mode 100644
index 000000000000..f3f478f14ff5
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+#include <drm/drm_debugfs.h>
+
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_gt_sriov_pf_config.h"
+#include "xe_gt_sriov_pf_debugfs.h"
+#include "xe_pm.h"
+#include "xe_tile_debugfs.h"
+#include "xe_tile_sriov_pf_debugfs.h"
+#include "xe_sriov.h"
+#include "xe_sriov_pf.h"
+#include "xe_sriov_pf_provision.h"
+
+/*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov # d_inode->i_private = (xe_device*)
+ * │ ├── pf # d_inode->i_private = (xe_device*)
+ * │ │ ├── tile0 # d_inode->i_private = (xe_tile*)
+ * │ │ ├── tile1
+ * │ │ : :
+ * │ ├── vf1 # d_inode->i_private = VFID(1)
+ * │ │ ├── tile0 # d_inode->i_private = (xe_tile*)
+ * │ │ ├── tile1
+ * │ │ : :
+ * │ ├── vfN # d_inode->i_private = VFID(N)
+ * │ │ ├── tile0 # d_inode->i_private = (xe_tile*)
+ * │ │ ├── tile1
+ * : : : :
+ */
+
+static void *extract_priv(struct dentry *d)
+{
+ return d->d_inode->i_private;
+}
+
+__maybe_unused
+static struct xe_tile *extract_tile(struct dentry *d)
+{
+ return extract_priv(d);
+}
+
+static struct xe_device *extract_xe(struct dentry *d)
+{
+ return extract_priv(d->d_parent->d_parent);
+}
+
+__maybe_unused
+static unsigned int extract_vfid(struct dentry *d)
+{
+ void *pp = extract_priv(d->d_parent);
+
+ return pp == extract_xe(d) ? PFID : (uintptr_t)pp;
+}
+
+/*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * : ├── pf
+ * : ├── tile0
+ * : ├── ggtt_available
+ * ├── ggtt_provisioned
+ */
+
+static int pf_config_print_available_ggtt(struct xe_tile *tile, struct drm_printer *p)
+{
+ return xe_gt_sriov_pf_config_print_available_ggtt(tile->primary_gt, p);
+}
+
+static int pf_config_print_ggtt(struct xe_tile *tile, struct drm_printer *p)
+{
+ return xe_gt_sriov_pf_config_print_ggtt(tile->primary_gt, p);
+}
+
+static const struct drm_info_list pf_ggtt_info[] = {
+ {
+ "ggtt_available",
+ .show = xe_tile_debugfs_simple_show,
+ .data = pf_config_print_available_ggtt,
+ },
+ {
+ "ggtt_provisioned",
+ .show = xe_tile_debugfs_simple_show,
+ .data = pf_config_print_ggtt,
+ },
+};
+
+/*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * : ├── pf
+ * : ├── tile0
+ * : ├── vram_provisioned
+ */
+
+static int pf_config_print_vram(struct xe_tile *tile, struct drm_printer *p)
+{
+ return xe_gt_sriov_pf_config_print_lmem(tile->primary_gt, p);
+}
+
+static const struct drm_info_list pf_vram_info[] = {
+ {
+ "vram_provisioned",
+ .show = xe_tile_debugfs_simple_show,
+ .data = pf_config_print_vram,
+ },
+};
+
+/*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * │ ├── pf
+ * │ │ ├── tile0
+ * │ │ │ ├── ggtt_spare
+ * │ │ │ ├── vram_spare
+ * │ │ ├── tile1
+ * │ │ : :
+ * │ ├── vf1
+ * │ : ├── tile0
+ * │ │ ├── ggtt_quota
+ * │ │ ├── vram_quota
+ * │ ├── tile1
+ * │ : :
+ */
+
+#define DEFINE_SRIOV_TILE_CONFIG_DEBUGFS_ATTRIBUTE(NAME, CONFIG, TYPE, FORMAT) \
+ \
+static int NAME##_set(void *data, u64 val) \
+{ \
+ struct xe_tile *tile = extract_tile(data); \
+ unsigned int vfid = extract_vfid(data); \
+ struct xe_gt *gt = tile->primary_gt; \
+ struct xe_device *xe = tile->xe; \
+ int err; \
+ \
+ if (val > (TYPE)~0ull) \
+ return -EOVERFLOW; \
+ \
+ xe_pm_runtime_get(xe); \
+ err = xe_sriov_pf_wait_ready(xe) ?: \
+ xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \
+ if (!err) \
+ xe_sriov_pf_provision_set_custom_mode(xe); \
+ xe_pm_runtime_put(xe); \
+ \
+ return err; \
+} \
+ \
+static int NAME##_get(void *data, u64 *val) \
+{ \
+ struct xe_tile *tile = extract_tile(data); \
+ unsigned int vfid = extract_vfid(data); \
+ struct xe_gt *gt = tile->primary_gt; \
+ \
+ *val = xe_gt_sriov_pf_config_get_##CONFIG(gt, vfid); \
+ return 0; \
+} \
+ \
+DEFINE_DEBUGFS_ATTRIBUTE(NAME##_fops, NAME##_get, NAME##_set, FORMAT)
+
+DEFINE_SRIOV_TILE_CONFIG_DEBUGFS_ATTRIBUTE(ggtt, ggtt, u64, "%llu\n");
+DEFINE_SRIOV_TILE_CONFIG_DEBUGFS_ATTRIBUTE(vram, lmem, u64, "%llu\n");
+
+static void pf_add_config_attrs(struct xe_tile *tile, struct dentry *dent, unsigned int vfid)
+{
+ struct xe_device *xe = tile->xe;
+
+ xe_tile_assert(tile, tile == extract_tile(dent));
+ xe_tile_assert(tile, vfid == extract_vfid(dent));
+
+ debugfs_create_file_unsafe(vfid ? "ggtt_quota" : "ggtt_spare",
+ 0644, dent, dent, &ggtt_fops);
+ if (IS_DGFX(xe))
+ debugfs_create_file_unsafe(vfid ? "vram_quota" : "vram_spare",
+ xe_device_has_lmtt(xe) ? 0644 : 0444,
+ dent, dent, &vram_fops);
+}
+
+static void pf_populate_tile(struct xe_tile *tile, struct dentry *dent, unsigned int vfid)
+{
+ struct xe_device *xe = tile->xe;
+ struct drm_minor *minor = xe->drm.primary;
+ struct xe_gt *gt;
+ unsigned int id;
+
+ pf_add_config_attrs(tile, dent, vfid);
+
+ if (!vfid) {
+ drm_debugfs_create_files(pf_ggtt_info,
+ ARRAY_SIZE(pf_ggtt_info),
+ dent, minor);
+ if (IS_DGFX(xe))
+ drm_debugfs_create_files(pf_vram_info,
+ ARRAY_SIZE(pf_vram_info),
+ dent, minor);
+ }
+
+ for_each_gt_on_tile(gt, tile, id)
+ xe_gt_sriov_pf_debugfs_populate(gt, dent, vfid);
+}
+
+/**
+ * xe_tile_sriov_pf_debugfs_populate() - Populate SR-IOV debugfs tree with tile files.
+ * @tile: the &xe_tile to register
+ * @parent: the parent &dentry that represents the SR-IOV @vfid function
+ * @vfid: the VF identifier
+ *
+ * Add to the @parent directory new debugfs directory that will represent a @tile and
+ * populate it with files that are related to the SR-IOV @vfid function.
+ *
+ * This function can only be called on PF.
+ */
+void xe_tile_sriov_pf_debugfs_populate(struct xe_tile *tile, struct dentry *parent,
+ unsigned int vfid)
+{
+ struct xe_device *xe = tile->xe;
+ struct dentry *dent;
+ char name[10]; /* should be enough up to "tile%u\0" for 2^16 - 1 */
+
+ xe_tile_assert(tile, IS_SRIOV_PF(xe));
+ xe_tile_assert(tile, extract_priv(parent->d_parent) == xe);
+ xe_tile_assert(tile, extract_priv(parent) == tile->xe ||
+ (uintptr_t)extract_priv(parent) == vfid);
+
+ /*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * │ ├── pf # parent, d_inode->i_private = (xe_device*)
+ * │ │ ├── tile0 # d_inode->i_private = (xe_tile*)
+ * │ │ ├── tile1
+ * │ │ : :
+ * │ ├── vf1 # parent, d_inode->i_private = VFID(1)
+ * │ │ ├── tile0 # d_inode->i_private = (xe_tile*)
+ * │ │ ├── tile1
+ * : : : :
+ */
+ snprintf(name, sizeof(name), "tile%u", tile->id);
+ dent = debugfs_create_dir(name, parent);
+ if (IS_ERR(dent))
+ return;
+ dent->d_inode->i_private = tile;
+
+ xe_tile_assert(tile, extract_tile(dent) == tile);
+ xe_tile_assert(tile, extract_vfid(dent) == vfid);
+ xe_tile_assert(tile, extract_xe(dent) == xe);
+
+ pf_populate_tile(tile, dent, vfid);
+}
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.h b/drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.h
new file mode 100644
index 000000000000..55d179c44634
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_TILE_SRIOV_PF_DEBUGFS_H_
+#define _XE_TILE_SRIOV_PF_DEBUGFS_H_
+
+struct dentry;
+struct xe_tile;
+
+void xe_tile_sriov_pf_debugfs_populate(struct xe_tile *tile, struct dentry *parent,
+ unsigned int vfid);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_printk.h b/drivers/gpu/drm/xe/xe_tile_sriov_printk.h
new file mode 100644
index 000000000000..68323512872c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_printk.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_TILE_SRIOV_PRINTK_H_
+#define _XE_TILE_SRIOV_PRINTK_H_
+
+#include "xe_tile_printk.h"
+#include "xe_sriov_printk.h"
+
+#define __XE_TILE_SRIOV_PRINTK_FMT(_tile, _fmt, ...) \
+ __XE_TILE_PRINTK_FMT((_tile), _fmt, ##__VA_ARGS__)
+
+#define xe_tile_sriov_printk(_tile, _level, _fmt, ...) \
+ xe_sriov_##_level((_tile)->xe, __XE_TILE_SRIOV_PRINTK_FMT((_tile), _fmt, ##__VA_ARGS__))
+
+#define xe_tile_sriov_err(_tile, _fmt, ...) \
+ xe_tile_sriov_printk(_tile, err, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_sriov_notice(_tile, _fmt, ...) \
+ xe_tile_sriov_printk(_tile, notice, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_sriov_info(_tile, _fmt, ...) \
+ xe_tile_sriov_printk(_tile, info, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_sriov_dbg(_tile, _fmt, ...) \
+ xe_tile_sriov_printk(_tile, dbg, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_sriov_dbg_verbose(_tile, _fmt, ...) \
+ xe_tile_sriov_printk(_tile, dbg_verbose, _fmt, ##__VA_ARGS__)
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.c b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
index f221dbed16f0..c9bac2cfdd04 100644
--- a/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
@@ -9,7 +9,6 @@
#include "xe_assert.h"
#include "xe_ggtt.h"
-#include "xe_gt_sriov_vf.h"
#include "xe_sriov.h"
#include "xe_sriov_printk.h"
#include "xe_tile_sriov_vf.h"
@@ -40,10 +39,10 @@ static int vf_init_ggtt_balloons(struct xe_tile *tile)
*
* Return: 0 on success or a negative error code on failure.
*/
-int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile)
+static int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile)
{
- u64 ggtt_base = xe_gt_sriov_vf_ggtt_base(tile->primary_gt);
- u64 ggtt_size = xe_gt_sriov_vf_ggtt(tile->primary_gt);
+ u64 ggtt_base = tile->sriov.vf.self_config.ggtt_base;
+ u64 ggtt_size = tile->sriov.vf.self_config.ggtt_size;
struct xe_device *xe = tile_to_xe(tile);
u64 wopcm = xe_wopcm_size(xe);
u64 start, end;
@@ -232,7 +231,7 @@ int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile)
*/
/**
- * xe_tile_sriov_vf_fixup_ggtt_nodes - Shift GGTT allocations to match assigned range.
+ * xe_tile_sriov_vf_fixup_ggtt_nodes_locked - Shift GGTT allocations to match assigned range.
* @tile: the &xe_tile struct instance
* @shift: the shift value
*
@@ -240,15 +239,112 @@ int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile)
* within the global space. This range might have changed during migration,
* which requires all memory addresses pointing to GGTT to be shifted.
*/
-void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift)
+void xe_tile_sriov_vf_fixup_ggtt_nodes_locked(struct xe_tile *tile, s64 shift)
{
struct xe_ggtt *ggtt = tile->mem.ggtt;
- mutex_lock(&ggtt->lock);
+ lockdep_assert_held(&ggtt->lock);
xe_tile_sriov_vf_deballoon_ggtt_locked(tile);
xe_ggtt_shift_nodes_locked(ggtt, shift);
xe_tile_sriov_vf_balloon_ggtt_locked(tile);
+}
- mutex_unlock(&ggtt->lock);
+/**
+ * xe_tile_sriov_vf_lmem - VF LMEM configuration.
+ * @tile: the &xe_tile
+ *
+ * This function is for VF use only.
+ *
+ * Return: size of the LMEM assigned to VF.
+ */
+u64 xe_tile_sriov_vf_lmem(struct xe_tile *tile)
+{
+ struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ return config->lmem_size;
+}
+
+/**
+ * xe_tile_sriov_vf_lmem_store - Store VF LMEM configuration
+ * @tile: the &xe_tile
+ * @lmem_size: VF LMEM size to store
+ *
+ * This function is for VF use only.
+ */
+void xe_tile_sriov_vf_lmem_store(struct xe_tile *tile, u64 lmem_size)
+{
+ struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ config->lmem_size = lmem_size;
+}
+
+/**
+ * xe_tile_sriov_vf_ggtt - VF GGTT configuration.
+ * @tile: the &xe_tile
+ *
+ * This function is for VF use only.
+ *
+ * Return: size of the GGTT assigned to VF.
+ */
+u64 xe_tile_sriov_vf_ggtt(struct xe_tile *tile)
+{
+ struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ return config->ggtt_size;
+}
+
+/**
+ * xe_tile_sriov_vf_ggtt_store - Store VF GGTT configuration
+ * @tile: the &xe_tile
+ * @ggtt_size: VF GGTT size to store
+ *
+ * This function is for VF use only.
+ */
+void xe_tile_sriov_vf_ggtt_store(struct xe_tile *tile, u64 ggtt_size)
+{
+ struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ config->ggtt_size = ggtt_size;
+}
+
+/**
+ * xe_tile_sriov_vf_ggtt_base - VF GGTT base configuration.
+ * @tile: the &xe_tile
+ *
+ * This function is for VF use only.
+ *
+ * Return: base of the GGTT assigned to VF.
+ */
+u64 xe_tile_sriov_vf_ggtt_base(struct xe_tile *tile)
+{
+ struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ return config->ggtt_base;
+}
+
+/**
+ * xe_tile_sriov_vf_ggtt_base_store - Store VF GGTT base configuration
+ * @tile: the &xe_tile
+ * @ggtt_base: VF GGTT base to store
+ *
+ * This function is for VF use only.
+ */
+void xe_tile_sriov_vf_ggtt_base_store(struct xe_tile *tile, u64 ggtt_base)
+{
+ struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ config->ggtt_base = ggtt_base;
}
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.h b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
index 93eb043171e8..749f41504883 100644
--- a/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
@@ -11,8 +11,13 @@
struct xe_tile;
int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile);
-int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile);
void xe_tile_sriov_vf_deballoon_ggtt_locked(struct xe_tile *tile);
-void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift);
+void xe_tile_sriov_vf_fixup_ggtt_nodes_locked(struct xe_tile *tile, s64 shift);
+u64 xe_tile_sriov_vf_ggtt(struct xe_tile *tile);
+void xe_tile_sriov_vf_ggtt_store(struct xe_tile *tile, u64 ggtt_size);
+u64 xe_tile_sriov_vf_ggtt_base(struct xe_tile *tile);
+void xe_tile_sriov_vf_ggtt_base_store(struct xe_tile *tile, u64 ggtt_size);
+u64 xe_tile_sriov_vf_lmem(struct xe_tile *tile);
+void xe_tile_sriov_vf_lmem_store(struct xe_tile *tile, u64 lmem_size);
#endif
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h
new file mode 100644
index 000000000000..4807ca51614c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_TILE_SRIOV_VF_TYPES_H_
+#define _XE_TILE_SRIOV_VF_TYPES_H_
+
+#include <linux/types.h>
+
+/**
+ * struct xe_tile_sriov_vf_selfconfig - VF configuration data.
+ */
+struct xe_tile_sriov_vf_selfconfig {
+ /** @ggtt_base: assigned base offset of the GGTT region. */
+ u64 ggtt_base;
+ /** @ggtt_size: assigned size of the GGTT region. */
+ u64 ggtt_size;
+ /** @lmem_size: assigned size of the LMEM. */
+ u64 lmem_size;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
index dc588255674d..e368b2a36bac 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021-2023 Intel Corporation
- * Copyright (C) 2021-2002 Red Hat
+ * Copyright (C) 2021-2022 Red Hat
*/
#include <drm/drm_managed.h>
@@ -24,8 +24,8 @@
#include "xe_sriov.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_ttm_vram_mgr.h"
-#include "xe_wa.h"
#include "xe_vram.h"
+#include "xe_wa.h"
struct xe_ttm_stolen_mgr {
struct xe_ttm_vram_mgr base;
@@ -81,7 +81,7 @@ static u32 get_wopcm_size(struct xe_device *xe)
return wopcm_size;
}
-static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
+static u64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
{
struct xe_vram_region *tile_vram = xe_device_get_root_tile(xe)->mem.vram;
resource_size_t tile_io_start = xe_vram_region_io_start(tile_vram);
@@ -105,6 +105,8 @@ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
return 0;
stolen_size = tile_size - mgr->stolen_base;
+
+ xe_assert(xe, stolen_size > wopcm_size);
stolen_size -= wopcm_size;
/* Verify usage fits in the actual resource available */
diff --git a/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
index d38b91872da3..3e404eb8d098 100644
--- a/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021-2022 Intel Corporation
- * Copyright (C) 2021-2002 Red Hat
+ * Copyright (C) 2021-2022 Red Hat
*/
#include "xe_ttm_sys_mgr.h"
@@ -85,7 +85,7 @@ static const struct ttm_resource_manager_func xe_ttm_sys_mgr_func = {
.debug = xe_ttm_sys_mgr_debug
};
-static void ttm_sys_mgr_fini(struct drm_device *drm, void *arg)
+static void xe_ttm_sys_mgr_fini(struct drm_device *drm, void *arg)
{
struct xe_device *xe = (struct xe_device *)arg;
struct ttm_resource_manager *man = &xe->mem.sys_mgr;
@@ -116,5 +116,5 @@ int xe_ttm_sys_mgr_init(struct xe_device *xe)
ttm_resource_manager_init(man, &xe->ttm, gtt_size >> PAGE_SHIFT);
ttm_set_driver_manager(&xe->ttm, XE_PL_TT, man);
ttm_resource_manager_set_used(man, true);
- return drmm_add_action_or_reset(&xe->drm, ttm_sys_mgr_fini, xe);
+ return drmm_add_action_or_reset(&xe->drm, xe_ttm_sys_mgr_fini, xe);
}
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
index 9175b4a2214b..9f70802fce92 100644
--- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021-2022 Intel Corporation
- * Copyright (C) 2021-2002 Red Hat
+ * Copyright (C) 2021-2022 Red Hat
*/
#include <drm/drm_managed.h>
@@ -284,7 +284,7 @@ static const struct ttm_resource_manager_func xe_ttm_vram_mgr_func = {
.debug = xe_ttm_vram_mgr_debug
};
-static void ttm_vram_mgr_fini(struct drm_device *dev, void *arg)
+static void xe_ttm_vram_mgr_fini(struct drm_device *dev, void *arg)
{
struct xe_device *xe = to_xe_device(dev);
struct xe_ttm_vram_mgr *mgr = arg;
@@ -335,7 +335,7 @@ int __xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_ttm_vram_mgr *mgr,
ttm_set_driver_manager(&xe->ttm, mem_type, &mgr->manager);
ttm_resource_manager_set_used(&mgr->manager, true);
- return drmm_add_action_or_reset(&xe->drm, ttm_vram_mgr_fini, mgr);
+ return drmm_add_action_or_reset(&xe->drm, xe_ttm_vram_mgr_fini, mgr);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index a524170a04d0..7c140d8cb1e0 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -40,7 +40,8 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)))
},
{ XE_RTP_NAME("Tuning: Compression Overfetch"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED),
+ FUNC(xe_rtp_match_has_flat_ccs)),
XE_RTP_ACTIONS(CLR(CCCHKNREG1, ENCOMPPERFFIX),
SET(CCCHKNREG1, L3CMPCTRL))
},
@@ -58,12 +59,14 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG3, COMPPWOVERFETCHEN))
},
{ XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED),
+ FUNC(xe_rtp_match_has_flat_ccs)),
XE_RTP_ACTIONS(SET(L3SQCREG2,
COMPMEMRD256BOVRFETCHEN))
},
{ XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only - media"),
- XE_RTP_RULES(MEDIA_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED)),
+ XE_RTP_RULES(MEDIA_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED),
+ FUNC(xe_rtp_match_has_flat_ccs)),
XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG2,
COMPMEMRD256BOVRFETCHEN))
},
@@ -214,7 +217,14 @@ void xe_tuning_process_lrc(struct xe_hw_engine *hwe)
xe_rtp_process_to_sr(&ctx, lrc_tunings, ARRAY_SIZE(lrc_tunings), &hwe->reg_lrc);
}
-void xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p)
+/**
+ * xe_tuning_dump() - Dump GT tuning info into a drm printer.
+ * @gt: the &xe_gt
+ * @p: the &drm_printer
+ *
+ * Return: always 0.
+ */
+int xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p)
{
size_t idx;
@@ -222,11 +232,15 @@ void xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p)
for_each_set_bit(idx, gt->tuning_active.gt, ARRAY_SIZE(gt_tunings))
drm_printf_indent(p, 1, "%s\n", gt_tunings[idx].name);
- drm_printf(p, "\nEngine Tunings\n");
+ drm_puts(p, "\n");
+ drm_printf(p, "Engine Tunings\n");
for_each_set_bit(idx, gt->tuning_active.engine, ARRAY_SIZE(engine_tunings))
drm_printf_indent(p, 1, "%s\n", engine_tunings[idx].name);
- drm_printf(p, "\nLRC Tunings\n");
+ drm_puts(p, "\n");
+ drm_printf(p, "LRC Tunings\n");
for_each_set_bit(idx, gt->tuning_active.lrc, ARRAY_SIZE(lrc_tunings))
drm_printf_indent(p, 1, "%s\n", lrc_tunings[idx].name);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_tuning.h b/drivers/gpu/drm/xe/xe_tuning.h
index dd0d3ccc9c65..c1cc5927fda7 100644
--- a/drivers/gpu/drm/xe/xe_tuning.h
+++ b/drivers/gpu/drm/xe/xe_tuning.h
@@ -14,6 +14,6 @@ int xe_tuning_init(struct xe_gt *gt);
void xe_tuning_process_gt(struct xe_gt *gt);
void xe_tuning_process_engine(struct xe_hw_engine *hwe);
void xe_tuning_process_lrc(struct xe_hw_engine *hwe);
-void xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p);
+int xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p);
#endif
diff --git a/drivers/gpu/drm/xe/xe_userptr.c b/drivers/gpu/drm/xe/xe_userptr.c
index f16e92cd8090..0d9130b1958a 100644
--- a/drivers/gpu/drm/xe/xe_userptr.c
+++ b/drivers/gpu/drm/xe/xe_userptr.c
@@ -3,6 +3,7 @@
* Copyright © 2025 Intel Corporation
*/
+#include "xe_svm.h"
#include "xe_userptr.h"
#include <linux/mm.h>
@@ -54,7 +55,8 @@ int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
struct xe_device *xe = vm->xe;
struct drm_gpusvm_ctx ctx = {
.read_only = xe_vma_read_only(vma),
- .device_private_page_owner = NULL,
+ .device_private_page_owner = xe_svm_devm_owner(xe),
+ .allow_mixed = true,
};
lockdep_assert_held(&vm->lock);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 63c65e3d207b..10d77666a425 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -35,6 +35,7 @@
#include "xe_pt.h"
#include "xe_pxp.h"
#include "xe_res_cursor.h"
+#include "xe_sriov_vf.h"
#include "xe_svm.h"
#include "xe_sync.h"
#include "xe_tile.h"
@@ -111,12 +112,22 @@ static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
static int wait_for_existing_preempt_fences(struct xe_vm *vm)
{
struct xe_exec_queue *q;
+ bool vf_migration = IS_SRIOV_VF(vm->xe) &&
+ xe_sriov_vf_migration_supported(vm->xe);
+ signed long wait_time = vf_migration ? HZ / 5 : MAX_SCHEDULE_TIMEOUT;
xe_vm_assert_held(vm);
list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
if (q->lr.pfence) {
- long timeout = dma_fence_wait(q->lr.pfence, false);
+ long timeout;
+
+ timeout = dma_fence_wait_timeout(q->lr.pfence, false,
+ wait_time);
+ if (!timeout) {
+ xe_assert(vm->xe, vf_migration);
+ return -EAGAIN;
+ }
/* Only -ETIME on fence indicates VM needs to be killed */
if (timeout < 0 || q->lr.pfence->error == -ETIME)
@@ -466,6 +477,8 @@ static void preempt_rebind_work_func(struct work_struct *w)
retry:
if (!try_wait_for_completion(&vm->xe->pm_block) && vm_suspend_rebind_worker(vm)) {
up_write(&vm->lock);
+ /* We don't actually block but don't make progress. */
+ xe_pm_might_block_on_suspend();
return;
}
@@ -539,6 +552,19 @@ out_unlock:
out_unlock_outer:
if (err == -EAGAIN) {
trace_xe_vm_rebind_worker_retry(vm);
+
+ /*
+ * We can't block in workers on a VF which supports migration
+ * given this can block the VF post-migration workers from
+ * getting scheduled.
+ */
+ if (IS_SRIOV_VF(vm->xe) &&
+ xe_sriov_vf_migration_supported(vm->xe)) {
+ up_write(&vm->lock);
+ xe_vm_queue_rebind_worker(vm);
+ return;
+ }
+
goto retry;
}
@@ -1875,6 +1901,7 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
struct xe_device *xe = to_xe_device(dev);
struct xe_file *xef = to_xe_file(file);
struct drm_xe_vm_create *args = data;
+ struct xe_gt *wa_gt = xe_root_mmio_gt(xe);
struct xe_vm *vm;
u32 id;
int err;
@@ -1883,7 +1910,7 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, args->extensions))
return -EINVAL;
- if (XE_GT_WA(xe_root_mmio_gt(xe), 14016763929))
+ if (wa_gt && XE_GT_WA(wa_gt, 22014953428))
args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c
index 652df7a5f4f6..b62a96f8ef9e 100644
--- a/drivers/gpu/drm/xe/xe_vram.c
+++ b/drivers/gpu/drm/xe/xe_vram.c
@@ -13,13 +13,14 @@
#include "regs/xe_gt_regs.h"
#include "regs/xe_regs.h"
#include "xe_assert.h"
+#include "xe_bo.h"
#include "xe_device.h"
#include "xe_force_wake.h"
#include "xe_gt_mcr.h"
-#include "xe_gt_sriov_vf.h"
#include "xe_mmio.h"
#include "xe_module.h"
#include "xe_sriov.h"
+#include "xe_tile_sriov_vf.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_vram.h"
#include "xe_vram_types.h"
@@ -255,9 +256,9 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size,
offset = 0;
for_each_tile(t, xe, id)
for_each_if(t->id < tile->id)
- offset += xe_gt_sriov_vf_lmem(t->primary_gt);
+ offset += xe_tile_sriov_vf_lmem(t);
- *tile_size = xe_gt_sriov_vf_lmem(gt);
+ *tile_size = xe_tile_sriov_vf_lmem(tile);
*vram_size = *tile_size;
*tile_offset = offset;
@@ -301,8 +302,11 @@ static void vram_fini(void *arg)
xe->mem.vram->mapping = NULL;
- for_each_tile(tile, xe, id)
+ for_each_tile(tile, xe, id) {
tile->mem.vram->mapping = NULL;
+ if (tile->mem.kernel_vram)
+ tile->mem.kernel_vram->mapping = NULL;
+ }
}
struct xe_vram_region *xe_vram_region_alloc(struct xe_device *xe, u8 id, u32 placement)
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index cd03891654a1..b6dcd9827354 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -684,7 +684,7 @@ static const struct xe_rtp_entry_sr engine_was[] = {
},
{ XE_RTP_NAME("13012615864"),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001), OR,
- GRAPHICS_VERSION(3003),
+ GRAPHICS_VERSION_RANGE(3003, 3005),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS))
},
@@ -695,7 +695,7 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
{ XE_RTP_NAME("14021402888"),
- XE_RTP_RULES(GRAPHICS_VERSION(3003), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3003, 3005), FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
},
{ XE_RTP_NAME("18041344222"),
@@ -913,7 +913,7 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
DIS_AUTOSTRIP))
},
{ XE_RTP_NAME("22021007897"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3003), ENGINE_CLASS(RENDER)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3005), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE))
},
};
@@ -1086,7 +1086,14 @@ void xe_wa_device_dump(struct xe_device *xe, struct drm_printer *p)
drm_printf_indent(p, 1, "%s\n", device_oob_was[idx].name);
}
-void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p)
+/**
+ * xe_wa_gt_dump() - Dump GT workarounds into a drm printer.
+ * @gt: the &xe_gt
+ * @p: the &drm_printer
+ *
+ * Return: always 0.
+ */
+int xe_wa_gt_dump(struct xe_gt *gt, struct drm_printer *p)
{
size_t idx;
@@ -1094,18 +1101,22 @@ void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p)
for_each_set_bit(idx, gt->wa_active.gt, ARRAY_SIZE(gt_was))
drm_printf_indent(p, 1, "%s\n", gt_was[idx].name);
- drm_printf(p, "\nEngine Workarounds\n");
+ drm_puts(p, "\n");
+ drm_printf(p, "Engine Workarounds\n");
for_each_set_bit(idx, gt->wa_active.engine, ARRAY_SIZE(engine_was))
drm_printf_indent(p, 1, "%s\n", engine_was[idx].name);
- drm_printf(p, "\nLRC Workarounds\n");
+ drm_puts(p, "\n");
+ drm_printf(p, "LRC Workarounds\n");
for_each_set_bit(idx, gt->wa_active.lrc, ARRAY_SIZE(lrc_was))
drm_printf_indent(p, 1, "%s\n", lrc_was[idx].name);
- drm_printf(p, "\nOOB Workarounds\n");
+ drm_puts(p, "\n");
+ drm_printf(p, "OOB Workarounds\n");
for_each_set_bit(idx, gt->wa_active.oob, ARRAY_SIZE(oob_was))
if (oob_was[idx].name)
drm_printf_indent(p, 1, "%s\n", oob_was[idx].name);
+ return 0;
}
/*
@@ -1127,6 +1138,6 @@ void xe_wa_apply_tile_workarounds(struct xe_tile *tile)
if (IS_SRIOV_VF(tile->xe))
return;
- if (XE_GT_WA(tile->primary_gt, 22010954014))
+ if (XE_DEVICE_WA(tile->xe, 22010954014))
xe_mmio_rmw32(mmio, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS);
}
diff --git a/drivers/gpu/drm/xe/xe_wa.h b/drivers/gpu/drm/xe/xe_wa.h
index 6a869b2de643..8fd6a5af0910 100644
--- a/drivers/gpu/drm/xe/xe_wa.h
+++ b/drivers/gpu/drm/xe/xe_wa.h
@@ -22,7 +22,7 @@ void xe_wa_process_engine(struct xe_hw_engine *hwe);
void xe_wa_process_lrc(struct xe_hw_engine *hwe);
void xe_wa_apply_tile_workarounds(struct xe_tile *tile);
void xe_wa_device_dump(struct xe_device *xe, struct drm_printer *p);
-void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p);
+int xe_wa_gt_dump(struct xe_gt *gt, struct drm_printer *p);
/**
* XE_GT_WA - Out-of-band GT workarounds, to be queried and called as needed.
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index f3a6d5d239ce..fb38eb3d6e9a 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -11,10 +11,9 @@
18020744125 PLATFORM(PVC)
1509372804 PLATFORM(PVC), GRAPHICS_STEP(A0, C0)
1409600907 GRAPHICS_VERSION_RANGE(1200, 1250)
-14016763929 SUBPLATFORM(DG2, G10)
+22014953428 SUBPLATFORM(DG2, G10)
SUBPLATFORM(DG2, G12)
16017236439 PLATFORM(PVC)
-22010954014 PLATFORM(DG2)
14019821291 MEDIA_VERSION_RANGE(1300, 2000)
14015076503 MEDIA_VERSION(1300)
16020292621 GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0)
@@ -34,18 +33,18 @@
13011645652 GRAPHICS_VERSION(2004)
GRAPHICS_VERSION_RANGE(3000, 3001)
GRAPHICS_VERSION(3003)
+ GRAPHICS_VERSION_RANGE(3004, 3005)
14022293748 GRAPHICS_VERSION_RANGE(2001, 2002)
GRAPHICS_VERSION(2004)
- GRAPHICS_VERSION_RANGE(3000, 3001)
- GRAPHICS_VERSION(3003)
+ GRAPHICS_VERSION_RANGE(3000, 3005)
22019794406 GRAPHICS_VERSION_RANGE(2001, 2002)
GRAPHICS_VERSION(2004)
GRAPHICS_VERSION_RANGE(3000, 3001)
GRAPHICS_VERSION(3003)
+ GRAPHICS_VERSION_RANGE(3004, 3005)
22019338487 MEDIA_VERSION(2000)
GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_not_sriov_vf)
MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_not_sriov_vf)
-22019338487_display PLATFORM(LUNARLAKE)
16023588340 GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_not_sriov_vf)
14019789679 GRAPHICS_VERSION(1255)
GRAPHICS_VERSION_RANGE(1270, 2004)
@@ -63,11 +62,11 @@
16023105232 GRAPHICS_VERSION_RANGE(2001, 3001)
MEDIA_VERSION_RANGE(1301, 3000)
MEDIA_VERSION(3002)
- GRAPHICS_VERSION(3003)
+ GRAPHICS_VERSION_RANGE(3003, 3005)
16026508708 GRAPHICS_VERSION_RANGE(1200, 3001)
MEDIA_VERSION_RANGE(1300, 3000)
MEDIA_VERSION(3002)
- GRAPHICS_VERSION(3003)
+ GRAPHICS_VERSION_RANGE(3003, 3005)
14020001231 GRAPHICS_VERSION_RANGE(2001,2004), FUNC(xe_rtp_match_psmi_enabled)
MEDIA_VERSION(2000), FUNC(xe_rtp_match_psmi_enabled)
MEDIA_VERSION(3000), FUNC(xe_rtp_match_psmi_enabled)
@@ -75,9 +74,5 @@
16023683509 MEDIA_VERSION(2000), FUNC(xe_rtp_match_psmi_enabled)
MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_psmi_enabled)
-# SoC workaround - currently applies to all platforms with the following
-# primary GT GMDID
-14022085890 GRAPHICS_VERSION(2001)
-
15015404425_disable PLATFORM(PANTHERLAKE), MEDIA_STEP(B0, FOREVER)
16026007364 MEDIA_VERSION(3000)
diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c
index 2bee0a2275ed..02f3a7d78cf8 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_kms.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c
@@ -19,6 +19,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
@@ -363,10 +364,12 @@ static int zynqmp_dpsub_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args)
{
struct zynqmp_dpsub *dpsub = to_zynqmp_dpsub(drm);
- unsigned int pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ int ret;
/* Enforce the alignment constraints of the DMA engine. */
- args->pitch = ALIGN(pitch, dpsub->dma_align);
+ ret = drm_mode_size_dumb(drm, args, dpsub->dma_align, 0);
+ if (ret)
+ return ret;
return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
}
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 344cc9e741c1..723a80895cd4 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -471,6 +471,18 @@ static int host1x_device_add(struct host1x *host1x,
mutex_unlock(&clients_lock);
+ /*
+ * Add device even if there are no subdevs to ensure syncpoint functionality
+ * is available regardless of whether any engine subdevices are present
+ */
+ if (list_empty(&device->subdevs)) {
+ err = device_add(&device->dev);
+ if (err < 0)
+ dev_err(&device->dev, "failed to add device: %d\n", err);
+ else
+ device->registered = true;
+ }
+
return 0;
}
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 1f93e5e276c0..e365df6af353 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -585,14 +585,8 @@ static int host1x_probe(struct platform_device *pdev)
}
host->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(host->clk)) {
- err = PTR_ERR(host->clk);
-
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to get clock: %d\n", err);
-
- return err;
- }
+ if (IS_ERR(host->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(host->clk), "failed to get clock\n");
err = host1x_get_resets(host);
if (err)
@@ -821,6 +815,7 @@ u64 host1x_get_dma_mask(struct host1x *host1x)
}
EXPORT_SYMBOL(host1x_get_dma_mask);
+MODULE_SOFTDEP("post: tegra-drm");
MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
MODULE_DESCRIPTION("Host1x driver for Tegra products");
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index d44b8de890be..2df6a16d484e 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -47,24 +47,11 @@ static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
}
}
-static void submit_wait(struct host1x_job *job, u32 id, u32 threshold,
- u32 next_class)
+static void submit_wait(struct host1x_job *job, u32 id, u32 threshold)
{
struct host1x_cdma *cdma = &job->channel->cdma;
-#if HOST1X_HW >= 6
- u32 stream_id;
-
- /*
- * If a memory context has been set, use it. Otherwise
- * (if context isolation is disabled) use the engine's
- * firmware stream ID.
- */
- if (job->memory_context)
- stream_id = job->memory_context->stream_id;
- else
- stream_id = job->engine_fallback_streamid;
-
+#if HOST1X_HW >= 2
host1x_cdma_push_wide(cdma,
host1x_opcode_setclass(
HOST1X_CLASS_HOST1X,
@@ -76,23 +63,6 @@ static void submit_wait(struct host1x_job *job, u32 id, u32 threshold,
id,
HOST1X_OPCODE_NOP
);
- host1x_cdma_push_wide(&job->channel->cdma,
- host1x_opcode_setclass(job->class, 0, 0),
- host1x_opcode_setpayload(stream_id),
- host1x_opcode_setstreamid(job->engine_streamid_offset / 4),
- HOST1X_OPCODE_NOP);
-#elif HOST1X_HW >= 2
- host1x_cdma_push_wide(cdma,
- host1x_opcode_setclass(
- HOST1X_CLASS_HOST1X,
- HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32,
- /* WAIT_SYNCPT_32 is at SYNCPT_PAYLOAD_32+2 */
- BIT(0) | BIT(2)
- ),
- threshold,
- id,
- host1x_opcode_setclass(next_class, 0, 0)
- );
#else
/* TODO add waitchk or use waitbases or other mitigation */
host1x_cdma_push(cdma,
@@ -103,6 +73,32 @@ static void submit_wait(struct host1x_job *job, u32 id, u32 threshold,
),
host1x_class_host_wait_syncpt(id, threshold)
);
+#endif
+}
+
+static void submit_setclass(struct host1x_job *job, u32 next_class)
+{
+ struct host1x_cdma *cdma = &job->channel->cdma;
+
+#if HOST1X_HW >= 6
+ u32 stream_id;
+
+ /*
+ * If a memory context has been set, use it. Otherwise
+ * (if context isolation is disabled) use the engine's
+ * firmware stream ID.
+ */
+ if (job->memory_context)
+ stream_id = job->memory_context->stream_id;
+ else
+ stream_id = job->engine_fallback_streamid;
+
+ host1x_cdma_push_wide(cdma,
+ host1x_opcode_setclass(next_class, 0, 0),
+ host1x_opcode_setpayload(stream_id),
+ host1x_opcode_setstreamid(job->engine_streamid_offset / 4),
+ HOST1X_OPCODE_NOP);
+#else
host1x_cdma_push(cdma,
host1x_opcode_setclass(next_class, 0, 0),
HOST1X_OPCODE_NOP
@@ -110,7 +106,8 @@ static void submit_wait(struct host1x_job *job, u32 id, u32 threshold,
#endif
}
-static void submit_gathers(struct host1x_job *job, u32 job_syncpt_base)
+static void submit_gathers(struct host1x_job *job, struct host1x_job_cmd *cmds, u32 num_cmds,
+ u32 job_syncpt_base)
{
struct host1x_cdma *cdma = &job->channel->cdma;
#if HOST1X_HW < 6
@@ -119,8 +116,8 @@ static void submit_gathers(struct host1x_job *job, u32 job_syncpt_base)
unsigned int i;
u32 threshold;
- for (i = 0; i < job->num_cmds; i++) {
- struct host1x_job_cmd *cmd = &job->cmds[i];
+ for (i = 0; i < num_cmds; i++) {
+ struct host1x_job_cmd *cmd = &cmds[i];
if (cmd->is_wait) {
if (cmd->wait.relative)
@@ -128,7 +125,8 @@ static void submit_gathers(struct host1x_job *job, u32 job_syncpt_base)
else
threshold = cmd->wait.threshold;
- submit_wait(job, cmd->wait.id, threshold, cmd->wait.next_class);
+ submit_wait(job, cmd->wait.id, threshold);
+ submit_setclass(job, cmd->wait.next_class);
} else {
struct host1x_job_gather *g = &cmd->gather;
@@ -216,7 +214,34 @@ static void channel_program_cdma(struct host1x_job *job)
#if HOST1X_HW >= 6
u32 fence;
+ int i = 0;
+
+ if (job->num_cmds == 0)
+ goto prefences_done;
+ if (!job->cmds[0].is_wait || job->cmds[0].wait.relative)
+ goto prefences_done;
+
+ /* Enter host1x class with invalid stream ID for prefence waits. */
+ host1x_cdma_push_wide(cdma,
+ host1x_opcode_acquire_mlock(1),
+ host1x_opcode_setclass(1, 0, 0),
+ host1x_opcode_setpayload(0),
+ host1x_opcode_setstreamid(0x1fffff));
+
+ for (i = 0; i < job->num_cmds; i++) {
+ struct host1x_job_cmd *cmd = &job->cmds[i];
+
+ if (!cmd->is_wait || cmd->wait.relative)
+ break;
+
+ submit_wait(job, cmd->wait.id, cmd->wait.threshold);
+ }
+
+ host1x_cdma_push(cdma,
+ HOST1X_OPCODE_NOP,
+ host1x_opcode_release_mlock(1));
+prefences_done:
/* Enter engine class with invalid stream ID. */
host1x_cdma_push_wide(cdma,
host1x_opcode_acquire_mlock(job->class),
@@ -230,11 +255,12 @@ static void channel_program_cdma(struct host1x_job *job)
host1x_opcode_nonincr(HOST1X_UCLASS_INCR_SYNCPT, 1),
HOST1X_UCLASS_INCR_SYNCPT_INDX_F(job->syncpt->id) |
HOST1X_UCLASS_INCR_SYNCPT_COND_F(4));
- submit_wait(job, job->syncpt->id, fence, job->class);
+ submit_wait(job, job->syncpt->id, fence);
+ submit_setclass(job, job->class);
/* Submit work. */
job->syncpt_end = host1x_syncpt_incr_max(sp, job->syncpt_incrs);
- submit_gathers(job, job->syncpt_end - job->syncpt_incrs);
+ submit_gathers(job, job->cmds + i, job->num_cmds - i, job->syncpt_end - job->syncpt_incrs);
/* Before releasing MLOCK, ensure engine is idle again. */
fence = host1x_syncpt_incr_max(sp, 1);
@@ -242,7 +268,7 @@ static void channel_program_cdma(struct host1x_job *job)
host1x_opcode_nonincr(HOST1X_UCLASS_INCR_SYNCPT, 1),
HOST1X_UCLASS_INCR_SYNCPT_INDX_F(job->syncpt->id) |
HOST1X_UCLASS_INCR_SYNCPT_COND_F(4));
- submit_wait(job, job->syncpt->id, fence, job->class);
+ submit_wait(job, job->syncpt->id, fence);
/* Release MLOCK. */
host1x_cdma_push(cdma,
@@ -272,7 +298,7 @@ static void channel_program_cdma(struct host1x_job *job)
job->syncpt_end = host1x_syncpt_incr_max(sp, job->syncpt_incrs);
- submit_gathers(job, job->syncpt_end - job->syncpt_incrs);
+ submit_gathers(job, job->cmds, job->num_cmds, job->syncpt_end - job->syncpt_incrs);
#endif
}
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index f63d14a57a1d..acc7d82e0585 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -345,8 +345,6 @@ static void syncpt_release(struct kref *ref)
sp->locked = false;
- mutex_lock(&sp->host->syncpt_mutex);
-
host1x_syncpt_base_free(sp->base);
kfree(sp->name);
sp->base = NULL;
@@ -369,7 +367,7 @@ void host1x_syncpt_put(struct host1x_syncpt *sp)
if (!sp)
return;
- kref_put(&sp->ref, syncpt_release);
+ kref_put_mutex(&sp->ref, syncpt_release, &sp->host->syncpt_mutex);
}
EXPORT_SYMBOL(host1x_syncpt_put);
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index a257b739188d..a733f90eca55 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -816,11 +816,11 @@ config FB_I810_I2C
config FB_MATROX
tristate "Matrox acceleration"
depends on FB && PCI
+ depends on FB_TILEBLITTING
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
- select FB_TILEBLITTING
select FB_MACMODES if PPC_PMAC
help
Say Y here if you have a Matrox Millennium, Matrox Millennium II,
@@ -1050,11 +1050,11 @@ config FB_ATY_BACKLIGHT
config FB_S3
tristate "S3 Trio/Virge support"
depends on FB && PCI && HAS_IOPORT
+ depends on FB_TILEBLITTING
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
- select FB_TILEBLITTING
select FB_SVGALIB
select VGASTATE
select FB_CFB_REV_PIXELS_IN_BYTE
@@ -1256,11 +1256,11 @@ config FB_VOODOO1
config FB_VT8623
tristate "VIA VT8623 support"
depends on FB && PCI && HAS_IOPORT
+ depends on FB_TILEBLITTING
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
- select FB_TILEBLITTING
select FB_SVGALIB
select VGASTATE
select FONT_8x16 if FRAMEBUFFER_CONSOLE
@@ -1294,11 +1294,11 @@ config FB_TRIDENT
config FB_ARK
tristate "ARK 2000PV support"
depends on FB && PCI && HAS_IOPORT
+ depends on FB_TILEBLITTING
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_FOPS
- select FB_TILEBLITTING
select FB_SVGALIB
select VGASTATE
select FONT_8x16 if FRAMEBUFFER_CONSOLE
diff --git a/drivers/video/fbdev/core/Kconfig b/drivers/video/fbdev/core/Kconfig
index 006638eefa41..8d1993e0b591 100644
--- a/drivers/video/fbdev/core/Kconfig
+++ b/drivers/video/fbdev/core/Kconfig
@@ -180,7 +180,7 @@ config FB_BACKLIGHT
depends on FB
config FB_MODE_HELPERS
- bool "Enable Video Mode Handling Helpers"
+ bool
depends on FB
help
This enables functions for handling video modes using the
diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c
index a9ec7f488522..9d2e59796c3e 100644
--- a/drivers/video/fbdev/core/bitblit.c
+++ b/drivers/video/fbdev/core/bitblit.c
@@ -253,10 +253,10 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
int fg, int bg)
{
struct fb_cursor cursor;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
int w = DIV_ROUND_UP(vc->vc_font.width, 8), c;
- int y = real_y(ops->p, vc->state.y);
+ int y = real_y(par->p, vc->state.y);
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1;
char *src;
@@ -270,10 +270,10 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
attribute = get_attribute(info, c);
src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height));
- if (ops->cursor_state.image.data != src ||
- ops->cursor_reset) {
- ops->cursor_state.image.data = src;
- cursor.set |= FB_CUR_SETIMAGE;
+ if (par->cursor_state.image.data != src ||
+ par->cursor_reset) {
+ par->cursor_state.image.data = src;
+ cursor.set |= FB_CUR_SETIMAGE;
}
if (attribute) {
@@ -282,46 +282,46 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
dst = kmalloc_array(w, vc->vc_font.height, GFP_ATOMIC);
if (!dst)
return;
- kfree(ops->cursor_data);
- ops->cursor_data = dst;
+ kfree(par->cursor_data);
+ par->cursor_data = dst;
update_attr(dst, src, attribute, vc);
src = dst;
}
- if (ops->cursor_state.image.fg_color != fg ||
- ops->cursor_state.image.bg_color != bg ||
- ops->cursor_reset) {
- ops->cursor_state.image.fg_color = fg;
- ops->cursor_state.image.bg_color = bg;
+ if (par->cursor_state.image.fg_color != fg ||
+ par->cursor_state.image.bg_color != bg ||
+ par->cursor_reset) {
+ par->cursor_state.image.fg_color = fg;
+ par->cursor_state.image.bg_color = bg;
cursor.set |= FB_CUR_SETCMAP;
}
- if ((ops->cursor_state.image.dx != (vc->vc_font.width * vc->state.x)) ||
- (ops->cursor_state.image.dy != (vc->vc_font.height * y)) ||
- ops->cursor_reset) {
- ops->cursor_state.image.dx = vc->vc_font.width * vc->state.x;
- ops->cursor_state.image.dy = vc->vc_font.height * y;
+ if ((par->cursor_state.image.dx != (vc->vc_font.width * vc->state.x)) ||
+ (par->cursor_state.image.dy != (vc->vc_font.height * y)) ||
+ par->cursor_reset) {
+ par->cursor_state.image.dx = vc->vc_font.width * vc->state.x;
+ par->cursor_state.image.dy = vc->vc_font.height * y;
cursor.set |= FB_CUR_SETPOS;
}
- if (ops->cursor_state.image.height != vc->vc_font.height ||
- ops->cursor_state.image.width != vc->vc_font.width ||
- ops->cursor_reset) {
- ops->cursor_state.image.height = vc->vc_font.height;
- ops->cursor_state.image.width = vc->vc_font.width;
+ if (par->cursor_state.image.height != vc->vc_font.height ||
+ par->cursor_state.image.width != vc->vc_font.width ||
+ par->cursor_reset) {
+ par->cursor_state.image.height = vc->vc_font.height;
+ par->cursor_state.image.width = vc->vc_font.width;
cursor.set |= FB_CUR_SETSIZE;
}
- if (ops->cursor_state.hot.x || ops->cursor_state.hot.y ||
- ops->cursor_reset) {
- ops->cursor_state.hot.x = cursor.hot.y = 0;
+ if (par->cursor_state.hot.x || par->cursor_state.hot.y ||
+ par->cursor_reset) {
+ par->cursor_state.hot.x = cursor.hot.y = 0;
cursor.set |= FB_CUR_SETHOT;
}
if (cursor.set & FB_CUR_SETSIZE ||
- vc->vc_cursor_type != ops->p->cursor_shape ||
- ops->cursor_state.mask == NULL ||
- ops->cursor_reset) {
+ vc->vc_cursor_type != par->p->cursor_shape ||
+ par->cursor_state.mask == NULL ||
+ par->cursor_reset) {
char *mask = kmalloc_array(w, vc->vc_font.height, GFP_ATOMIC);
int cur_height, size, i = 0;
u8 msk = 0xff;
@@ -329,13 +329,13 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
if (!mask)
return;
- kfree(ops->cursor_state.mask);
- ops->cursor_state.mask = mask;
+ kfree(par->cursor_state.mask);
+ par->cursor_state.mask = mask;
- ops->p->cursor_shape = vc->vc_cursor_type;
+ par->p->cursor_shape = vc->vc_cursor_type;
cursor.set |= FB_CUR_SETSHAPE;
- switch (CUR_SIZE(ops->p->cursor_shape)) {
+ switch (CUR_SIZE(par->p->cursor_shape)) {
case CUR_NONE:
cur_height = 0;
break;
@@ -364,19 +364,19 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
mask[i++] = msk;
}
- ops->cursor_state.enable = enable && !use_sw;
+ par->cursor_state.enable = enable && !use_sw;
cursor.image.data = src;
- cursor.image.fg_color = ops->cursor_state.image.fg_color;
- cursor.image.bg_color = ops->cursor_state.image.bg_color;
- cursor.image.dx = ops->cursor_state.image.dx;
- cursor.image.dy = ops->cursor_state.image.dy;
- cursor.image.height = ops->cursor_state.image.height;
- cursor.image.width = ops->cursor_state.image.width;
- cursor.hot.x = ops->cursor_state.hot.x;
- cursor.hot.y = ops->cursor_state.hot.y;
- cursor.mask = ops->cursor_state.mask;
- cursor.enable = ops->cursor_state.enable;
+ cursor.image.fg_color = par->cursor_state.image.fg_color;
+ cursor.image.bg_color = par->cursor_state.image.bg_color;
+ cursor.image.dx = par->cursor_state.image.dx;
+ cursor.image.dy = par->cursor_state.image.dy;
+ cursor.image.height = par->cursor_state.image.height;
+ cursor.image.width = par->cursor_state.image.width;
+ cursor.hot.x = par->cursor_state.hot.x;
+ cursor.hot.y = par->cursor_state.hot.y;
+ cursor.mask = par->cursor_state.mask;
+ cursor.enable = par->cursor_state.enable;
cursor.image.depth = 1;
cursor.rop = ROP_XOR;
@@ -386,31 +386,31 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
if (err)
soft_cursor(info, &cursor);
- ops->cursor_reset = 0;
+ par->cursor_reset = 0;
}
static int bit_update_start(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
int err;
- err = fb_pan_display(info, &ops->var);
- ops->var.xoffset = info->var.xoffset;
- ops->var.yoffset = info->var.yoffset;
- ops->var.vmode = info->var.vmode;
+ err = fb_pan_display(info, &par->var);
+ par->var.xoffset = info->var.xoffset;
+ par->var.yoffset = info->var.yoffset;
+ par->var.vmode = info->var.vmode;
return err;
}
-void fbcon_set_bitops(struct fbcon_ops *ops)
+static const struct fbcon_bitops bit_fbcon_bitops = {
+ .bmove = bit_bmove,
+ .clear = bit_clear,
+ .putcs = bit_putcs,
+ .clear_margins = bit_clear_margins,
+ .cursor = bit_cursor,
+ .update_start = bit_update_start,
+};
+
+void fbcon_set_bitops_ur(struct fbcon_par *par)
{
- ops->bmove = bit_bmove;
- ops->clear = bit_clear;
- ops->putcs = bit_putcs;
- ops->clear_margins = bit_clear_margins;
- ops->cursor = bit_cursor;
- ops->update_start = bit_update_start;
- ops->rotate_font = NULL;
-
- if (ops->rotate)
- fbcon_set_rotate(ops);
+ par->bitops = &bit_fbcon_bitops;
}
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 96cc9b389246..7f35ad66b462 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -81,6 +81,7 @@
#include <asm/irq.h>
#include "fbcon.h"
+#include "fbcon_rotate.h"
#include "fb_internal.h"
/*
@@ -198,27 +199,27 @@ static struct device *fbcon_device;
#ifdef CONFIG_FRAMEBUFFER_CONSOLE_ROTATION
static inline void fbcon_set_rotation(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
if (!(info->flags & FBINFO_MISC_TILEBLITTING) &&
- ops->p->con_rotate < 4)
- ops->rotate = ops->p->con_rotate;
+ par->p->con_rotate < 4)
+ par->rotate = par->p->con_rotate;
else
- ops->rotate = 0;
+ par->rotate = 0;
}
static void fbcon_rotate(struct fb_info *info, u32 rotate)
{
- struct fbcon_ops *ops= info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fb_info *fb_info;
- if (!ops || ops->currcon == -1)
+ if (!par || par->currcon == -1)
return;
- fb_info = fbcon_info_from_console(ops->currcon);
+ fb_info = fbcon_info_from_console(par->currcon);
if (info == fb_info) {
- struct fbcon_display *p = &fb_display[ops->currcon];
+ struct fbcon_display *p = &fb_display[par->currcon];
if (rotate < 4)
p->con_rotate = rotate;
@@ -231,12 +232,12 @@ static void fbcon_rotate(struct fb_info *info, u32 rotate)
static void fbcon_rotate_all(struct fb_info *info, u32 rotate)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct vc_data *vc;
struct fbcon_display *p;
int i;
- if (!ops || ops->currcon < 0 || rotate > 3)
+ if (!par || par->currcon < 0 || rotate > 3)
return;
for (i = first_fb_vc; i <= last_fb_vc; i++) {
@@ -254,9 +255,9 @@ static void fbcon_rotate_all(struct fb_info *info, u32 rotate)
#else
static inline void fbcon_set_rotation(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
- ops->rotate = FB_ROTATE_UR;
+ par->rotate = FB_ROTATE_UR;
}
static void fbcon_rotate(struct fb_info *info, u32 rotate)
@@ -270,11 +271,31 @@ static void fbcon_rotate_all(struct fb_info *info, u32 rotate)
}
#endif /* CONFIG_FRAMEBUFFER_CONSOLE_ROTATION */
+static void fbcon_set_bitops(struct fbcon_par *par)
+{
+ switch (par->rotate) {
+ default:
+ fallthrough;
+ case FB_ROTATE_UR:
+ fbcon_set_bitops_ur(par);
+ break;
+ case FB_ROTATE_CW:
+ fbcon_set_bitops_cw(par);
+ break;
+ case FB_ROTATE_UD:
+ fbcon_set_bitops_ud(par);
+ break;
+ case FB_ROTATE_CCW:
+ fbcon_set_bitops_ccw(par);
+ break;
+ }
+}
+
static int fbcon_get_rotate(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
- return (ops) ? ops->rotate : 0;
+ return (par) ? par->rotate : 0;
}
static bool fbcon_skip_panic(struct fb_info *info)
@@ -284,10 +305,10 @@ static bool fbcon_skip_panic(struct fb_info *info)
static inline bool fbcon_is_active(struct vc_data *vc, struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
return info->state == FBINFO_STATE_RUNNING &&
- vc->vc_mode == KD_TEXT && !ops->graphics && !fbcon_skip_panic(info);
+ vc->vc_mode == KD_TEXT && !par->graphics && !fbcon_skip_panic(info);
}
static int get_color(struct vc_data *vc, struct fb_info *info,
@@ -369,7 +390,7 @@ static int get_bg_color(struct vc_data *vc, struct fb_info *info, u16 c)
static void fb_flashcursor(struct work_struct *work)
{
- struct fbcon_ops *ops = container_of(work, struct fbcon_ops, cursor_work.work);
+ struct fbcon_par *par = container_of(work, struct fbcon_par, cursor_work.work);
struct fb_info *info;
struct vc_data *vc = NULL;
int c;
@@ -384,10 +405,10 @@ static void fb_flashcursor(struct work_struct *work)
return;
/* protected by console_lock */
- info = ops->info;
+ info = par->info;
- if (ops->currcon != -1)
- vc = vc_cons[ops->currcon].d;
+ if (par->currcon != -1)
+ vc = vc_cons[par->currcon].d;
if (!vc || !con_is_visible(vc) ||
fbcon_info_from_console(vc->vc_num) != info ||
@@ -397,30 +418,30 @@ static void fb_flashcursor(struct work_struct *work)
}
c = scr_readw((u16 *) vc->vc_pos);
- enable = ops->cursor_flash && !ops->cursor_state.enable;
- ops->cursor(vc, info, enable,
- get_fg_color(vc, info, c),
- get_bg_color(vc, info, c));
+ enable = par->cursor_flash && !par->cursor_state.enable;
+ par->bitops->cursor(vc, info, enable,
+ get_fg_color(vc, info, c),
+ get_bg_color(vc, info, c));
console_unlock();
- queue_delayed_work(system_power_efficient_wq, &ops->cursor_work,
- ops->cur_blink_jiffies);
+ queue_delayed_work(system_power_efficient_wq, &par->cursor_work,
+ par->cur_blink_jiffies);
}
static void fbcon_add_cursor_work(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
if (fbcon_cursor_blink)
- queue_delayed_work(system_power_efficient_wq, &ops->cursor_work,
- ops->cur_blink_jiffies);
+ queue_delayed_work(system_power_efficient_wq, &par->cursor_work,
+ par->cur_blink_jiffies);
}
static void fbcon_del_cursor_work(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
- cancel_delayed_work_sync(&ops->cursor_work);
+ cancel_delayed_work_sync(&par->cursor_work);
}
#ifndef MODULE
@@ -580,7 +601,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
int cols, int rows, int new_cols, int new_rows)
{
/* Need to make room for the logo */
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
int cnt, erase = vc->vc_video_erase_char, step;
unsigned short *save = NULL, *r, *q;
int logo_height;
@@ -596,7 +617,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
*/
if (fb_get_color_depth(&info->var, &info->fix) == 1)
erase &= ~0x400;
- logo_height = fb_prepare_logo(info, ops->rotate);
+ logo_height = fb_prepare_logo(info, par->rotate);
logo_lines = DIV_ROUND_UP(logo_height, vc->vc_font.height);
q = (unsigned short *) (vc->vc_origin +
vc->vc_size_row * rows);
@@ -668,15 +689,15 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
#ifdef CONFIG_FB_TILEBLITTING
static void set_blitting_type(struct vc_data *vc, struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
- ops->p = &fb_display[vc->vc_num];
+ par->p = &fb_display[vc->vc_num];
if ((info->flags & FBINFO_MISC_TILEBLITTING))
fbcon_set_tileops(vc, info);
else {
fbcon_set_rotation(info);
- fbcon_set_bitops(ops);
+ fbcon_set_bitops(par);
}
}
@@ -693,12 +714,12 @@ static int fbcon_invalid_charcount(struct fb_info *info, unsigned charcount)
#else
static void set_blitting_type(struct vc_data *vc, struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
info->flags &= ~FBINFO_MISC_TILEBLITTING;
- ops->p = &fb_display[vc->vc_num];
+ par->p = &fb_display[vc->vc_num];
fbcon_set_rotation(info);
- fbcon_set_bitops(ops);
+ fbcon_set_bitops(par);
}
static int fbcon_invalid_charcount(struct fb_info *info, unsigned charcount)
@@ -718,13 +739,13 @@ static void fbcon_release(struct fb_info *info)
module_put(info->fbops->owner);
if (info->fbcon_par) {
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
fbcon_del_cursor_work(info);
- kfree(ops->cursor_state.mask);
- kfree(ops->cursor_data);
- kfree(ops->cursor_src);
- kfree(ops->fontbuffer);
+ kfree(par->cursor_state.mask);
+ kfree(par->cursor_data);
+ kfree(par->cursor_src);
+ kfree(par->fontbuffer);
kfree(info->fbcon_par);
info->fbcon_par = NULL;
}
@@ -732,7 +753,7 @@ static void fbcon_release(struct fb_info *info)
static int fbcon_open(struct fb_info *info)
{
- struct fbcon_ops *ops;
+ struct fbcon_par *par;
if (!try_module_get(info->fbops->owner))
return -ENODEV;
@@ -746,16 +767,16 @@ static int fbcon_open(struct fb_info *info)
}
unlock_fb_info(info);
- ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL);
- if (!ops) {
+ par = kzalloc(sizeof(*par), GFP_KERNEL);
+ if (!par) {
fbcon_release(info);
return -ENOMEM;
}
- INIT_DELAYED_WORK(&ops->cursor_work, fb_flashcursor);
- ops->info = info;
- info->fbcon_par = ops;
- ops->cur_blink_jiffies = HZ / 5;
+ INIT_DELAYED_WORK(&par->cursor_work, fb_flashcursor);
+ par->info = info;
+ info->fbcon_par = par;
+ par->cur_blink_jiffies = HZ / 5;
return 0;
}
@@ -802,12 +823,12 @@ static void con2fb_release_oldinfo(struct vc_data *vc, struct fb_info *oldinfo,
static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
int unit, int show_logo)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
int ret;
- ops->currcon = fg_console;
+ par->currcon = fg_console;
- if (info->fbops->fb_set_par && !ops->initialized) {
+ if (info->fbops->fb_set_par && !par->initialized) {
ret = info->fbops->fb_set_par(info);
if (ret)
@@ -816,8 +837,8 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
"error code %d\n", ret);
}
- ops->initialized = true;
- ops->graphics = 0;
+ par->initialized = true;
+ par->graphics = 0;
fbcon_set_disp(info, &info->var, unit);
if (show_logo) {
@@ -954,7 +975,7 @@ static const char *fbcon_startup(void)
struct vc_data *vc = vc_cons[fg_console].d;
const struct font_desc *font = NULL;
struct fb_info *info = NULL;
- struct fbcon_ops *ops;
+ struct fbcon_par *par;
int rows, cols;
/*
@@ -974,10 +995,10 @@ static const char *fbcon_startup(void)
if (fbcon_open(info))
return NULL;
- ops = info->fbcon_par;
- ops->currcon = -1;
- ops->graphics = 1;
- ops->cur_rotate = -1;
+ par = info->fbcon_par;
+ par->currcon = -1;
+ par->graphics = 1;
+ par->cur_rotate = -1;
p->con_rotate = initial_rotation;
if (p->con_rotate == -1)
@@ -1000,8 +1021,8 @@ static const char *fbcon_startup(void)
vc->vc_font.charcount = font->charcount;
}
- cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
- rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres);
+ rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres);
cols /= vc->vc_font.width;
rows /= vc->vc_font.height;
vc_resize(vc, cols, rows);
@@ -1019,7 +1040,7 @@ static const char *fbcon_startup(void)
static void fbcon_init(struct vc_data *vc, bool init)
{
struct fb_info *info;
- struct fbcon_ops *ops;
+ struct fbcon_par *par;
struct vc_data **default_mode = vc->vc_display_fg;
struct vc_data *svc = *default_mode;
struct fbcon_display *t, *p = &fb_display[vc->vc_num];
@@ -1093,8 +1114,8 @@ static void fbcon_init(struct vc_data *vc, bool init)
if (!*vc->uni_pagedict_loc)
con_copy_unimap(vc, svc);
- ops = info->fbcon_par;
- ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
+ par = info->fbcon_par;
+ par->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
p->con_rotate = initial_rotation;
if (p->con_rotate == -1)
@@ -1106,8 +1127,8 @@ static void fbcon_init(struct vc_data *vc, bool init)
cols = vc->vc_cols;
rows = vc->vc_rows;
- new_cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
- new_rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ new_cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres);
+ new_rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres);
new_cols /= vc->vc_font.width;
new_rows /= vc->vc_font.height;
@@ -1119,7 +1140,7 @@ static void fbcon_init(struct vc_data *vc, bool init)
* We need to do it in fbcon_init() to prevent screen corruption.
*/
if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) {
- if (info->fbops->fb_set_par && !ops->initialized) {
+ if (info->fbops->fb_set_par && !par->initialized) {
ret = info->fbops->fb_set_par(info);
if (ret)
@@ -1128,10 +1149,10 @@ static void fbcon_init(struct vc_data *vc, bool init)
"error code %d\n", ret);
}
- ops->initialized = true;
+ par->initialized = true;
}
- ops->graphics = 0;
+ par->graphics = 0;
#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
if ((info->flags & FBINFO_HWACCEL_COPYAREA) &&
@@ -1155,12 +1176,12 @@ static void fbcon_init(struct vc_data *vc, bool init)
if (logo)
fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows);
- if (ops->rotate_font && ops->rotate_font(info, vc)) {
- ops->rotate = FB_ROTATE_UR;
+ if (par->bitops->rotate_font && par->bitops->rotate_font(info, vc)) {
+ par->rotate = FB_ROTATE_UR;
set_blitting_type(vc, info);
}
- ops->p = &fb_display[fg_console];
+ par->p = &fb_display[fg_console];
}
static void fbcon_free_font(struct fbcon_display *p)
@@ -1198,7 +1219,7 @@ static void fbcon_deinit(struct vc_data *vc)
{
struct fbcon_display *p = &fb_display[vc->vc_num];
struct fb_info *info;
- struct fbcon_ops *ops;
+ struct fbcon_par *par;
int idx;
fbcon_free_font(p);
@@ -1212,15 +1233,15 @@ static void fbcon_deinit(struct vc_data *vc)
if (!info)
goto finished;
- ops = info->fbcon_par;
+ par = info->fbcon_par;
- if (!ops)
+ if (!par)
goto finished;
if (con_is_visible(vc))
fbcon_del_cursor_work(info);
- ops->initialized = false;
+ par->initialized = false;
finished:
fbcon_free_font(p);
@@ -1267,7 +1288,7 @@ static void __fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
unsigned int height, unsigned int width)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
int fg, bg;
struct fbcon_display *p = &fb_display[vc->vc_num];
u_int y_break;
@@ -1282,7 +1303,7 @@ static void __fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
vc->vc_top = 0;
/*
* If the font dimensions are not an integral of the display
- * dimensions then the ops->clear below won't end up clearing
+ * dimensions then the par->clear below won't end up clearing
* the margins. Call clear_margins here in case the logo
* bitmap stretched into the margin area.
*/
@@ -1296,11 +1317,11 @@ static void __fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
y_break = p->vrows - p->yscroll;
if (sy < y_break && sy + height - 1 >= y_break) {
u_int b = y_break - sy;
- ops->clear(vc, info, real_y(p, sy), sx, b, width, fg, bg);
- ops->clear(vc, info, real_y(p, sy + b), sx, height - b,
- width, fg, bg);
+ par->bitops->clear(vc, info, real_y(p, sy), sx, b, width, fg, bg);
+ par->bitops->clear(vc, info, real_y(p, sy + b), sx, height - b,
+ width, fg, bg);
} else
- ops->clear(vc, info, real_y(p, sy), sx, height, width, fg, bg);
+ par->bitops->clear(vc, info, real_y(p, sy), sx, height, width, fg, bg);
}
static void fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
@@ -1314,30 +1335,30 @@ static void fbcon_putcs(struct vc_data *vc, const u16 *s, unsigned int count,
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_display *p = &fb_display[vc->vc_num];
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
if (fbcon_is_active(vc, info))
- ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
- get_fg_color(vc, info, scr_readw(s)),
- get_bg_color(vc, info, scr_readw(s)));
+ par->bitops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
+ get_fg_color(vc, info, scr_readw(s)),
+ get_bg_color(vc, info, scr_readw(s)));
}
static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
if (fbcon_is_active(vc, info))
- ops->clear_margins(vc, info, margin_color, bottom_only);
+ par->bitops->clear_margins(vc, info, margin_color, bottom_only);
}
static void fbcon_cursor(struct vc_data *vc, bool enable)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
int c = scr_readw((u16 *) vc->vc_pos);
- ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
+ par->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
if (!fbcon_is_active(vc, info) || vc->vc_deccm != 1)
return;
@@ -1347,14 +1368,14 @@ static void fbcon_cursor(struct vc_data *vc, bool enable)
else
fbcon_add_cursor_work(info);
- ops->cursor_flash = enable;
+ par->cursor_flash = enable;
- if (!ops->cursor)
+ if (!par->bitops->cursor)
return;
- ops->cursor(vc, info, enable,
- get_fg_color(vc, info, c),
- get_bg_color(vc, info, c));
+ par->bitops->cursor(vc, info, enable,
+ get_fg_color(vc, info, c),
+ get_bg_color(vc, info, c));
}
static int scrollback_phys_max = 0;
@@ -1367,7 +1388,7 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
struct fbcon_display *p, *t;
struct vc_data **default_mode, *vc;
struct vc_data *svc;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
int rows, cols;
unsigned long ret = 0;
@@ -1400,7 +1421,7 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
var->yoffset = info->var.yoffset;
var->xoffset = info->var.xoffset;
fb_set_var(info, var);
- ops->var = info->var;
+ par->var = info->var;
vc->vc_can_do_color = (fb_get_color_depth(&info->var, &info->fix)!=1);
vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
if (vc->vc_font.charcount == 256) {
@@ -1416,8 +1437,8 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
if (!*vc->uni_pagedict_loc)
con_copy_unimap(vc, svc);
- cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
- rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres);
+ rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres);
cols /= vc->vc_font.width;
rows /= vc->vc_font.height;
ret = vc_resize(vc, cols, rows);
@@ -1429,16 +1450,16 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
static __inline__ void ywrap_up(struct vc_data *vc, int count)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
p->yscroll += count;
if (p->yscroll >= p->vrows) /* Deal with wrap */
p->yscroll -= p->vrows;
- ops->var.xoffset = 0;
- ops->var.yoffset = p->yscroll * vc->vc_font.height;
- ops->var.vmode |= FB_VMODE_YWRAP;
- ops->update_start(info);
+ par->var.xoffset = 0;
+ par->var.yoffset = p->yscroll * vc->vc_font.height;
+ par->var.vmode |= FB_VMODE_YWRAP;
+ par->bitops->update_start(info);
scrollback_max += count;
if (scrollback_max > scrollback_phys_max)
scrollback_max = scrollback_phys_max;
@@ -1448,16 +1469,16 @@ static __inline__ void ywrap_up(struct vc_data *vc, int count)
static __inline__ void ywrap_down(struct vc_data *vc, int count)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
p->yscroll -= count;
if (p->yscroll < 0) /* Deal with wrap */
p->yscroll += p->vrows;
- ops->var.xoffset = 0;
- ops->var.yoffset = p->yscroll * vc->vc_font.height;
- ops->var.vmode |= FB_VMODE_YWRAP;
- ops->update_start(info);
+ par->var.xoffset = 0;
+ par->var.yoffset = p->yscroll * vc->vc_font.height;
+ par->var.vmode |= FB_VMODE_YWRAP;
+ par->bitops->update_start(info);
scrollback_max -= count;
if (scrollback_max < 0)
scrollback_max = 0;
@@ -1468,19 +1489,19 @@ static __inline__ void ypan_up(struct vc_data *vc, int count)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_display *p = &fb_display[vc->vc_num];
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
p->yscroll += count;
if (p->yscroll > p->vrows - vc->vc_rows) {
- ops->bmove(vc, info, p->vrows - vc->vc_rows,
- 0, 0, 0, vc->vc_rows, vc->vc_cols);
+ par->bitops->bmove(vc, info, p->vrows - vc->vc_rows,
+ 0, 0, 0, vc->vc_rows, vc->vc_cols);
p->yscroll -= p->vrows - vc->vc_rows;
}
- ops->var.xoffset = 0;
- ops->var.yoffset = p->yscroll * vc->vc_font.height;
- ops->var.vmode &= ~FB_VMODE_YWRAP;
- ops->update_start(info);
+ par->var.xoffset = 0;
+ par->var.yoffset = p->yscroll * vc->vc_font.height;
+ par->var.vmode &= ~FB_VMODE_YWRAP;
+ par->bitops->update_start(info);
fbcon_clear_margins(vc, 1);
scrollback_max += count;
if (scrollback_max > scrollback_phys_max)
@@ -1491,7 +1512,7 @@ static __inline__ void ypan_up(struct vc_data *vc, int count)
static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
p->yscroll += count;
@@ -1501,10 +1522,10 @@ static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count)
fbcon_redraw_move(vc, p, t + count, vc->vc_rows - count, t);
}
- ops->var.xoffset = 0;
- ops->var.yoffset = p->yscroll * vc->vc_font.height;
- ops->var.vmode &= ~FB_VMODE_YWRAP;
- ops->update_start(info);
+ par->var.xoffset = 0;
+ par->var.yoffset = p->yscroll * vc->vc_font.height;
+ par->var.vmode &= ~FB_VMODE_YWRAP;
+ par->bitops->update_start(info);
fbcon_clear_margins(vc, 1);
scrollback_max += count;
if (scrollback_max > scrollback_phys_max)
@@ -1516,19 +1537,19 @@ static __inline__ void ypan_down(struct vc_data *vc, int count)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_display *p = &fb_display[vc->vc_num];
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
p->yscroll -= count;
if (p->yscroll < 0) {
- ops->bmove(vc, info, 0, 0, p->vrows - vc->vc_rows,
- 0, vc->vc_rows, vc->vc_cols);
+ par->bitops->bmove(vc, info, 0, 0, p->vrows - vc->vc_rows,
+ 0, vc->vc_rows, vc->vc_cols);
p->yscroll += p->vrows - vc->vc_rows;
}
- ops->var.xoffset = 0;
- ops->var.yoffset = p->yscroll * vc->vc_font.height;
- ops->var.vmode &= ~FB_VMODE_YWRAP;
- ops->update_start(info);
+ par->var.xoffset = 0;
+ par->var.yoffset = p->yscroll * vc->vc_font.height;
+ par->var.vmode &= ~FB_VMODE_YWRAP;
+ par->bitops->update_start(info);
fbcon_clear_margins(vc, 1);
scrollback_max -= count;
if (scrollback_max < 0)
@@ -1539,7 +1560,7 @@ static __inline__ void ypan_down(struct vc_data *vc, int count)
static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
p->yscroll -= count;
@@ -1549,10 +1570,10 @@ static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count)
fbcon_redraw_move(vc, p, t, vc->vc_rows - count, t + count);
}
- ops->var.xoffset = 0;
- ops->var.yoffset = p->yscroll * vc->vc_font.height;
- ops->var.vmode &= ~FB_VMODE_YWRAP;
- ops->update_start(info);
+ par->var.xoffset = 0;
+ par->var.yoffset = p->yscroll * vc->vc_font.height;
+ par->var.vmode &= ~FB_VMODE_YWRAP;
+ par->bitops->update_start(info);
fbcon_clear_margins(vc, 1);
scrollback_max -= count;
if (scrollback_max < 0)
@@ -1601,7 +1622,7 @@ static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info,
unsigned short *d = (unsigned short *)
(vc->vc_origin + vc->vc_size_row * line);
unsigned short *s = d + offset;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
while (count--) {
unsigned short *start = s;
@@ -1614,8 +1635,8 @@ static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info,
if (c == scr_readw(d)) {
if (s > start) {
- ops->bmove(vc, info, line + ycount, x,
- line, x, 1, s-start);
+ par->bitops->bmove(vc, info, line + ycount, x,
+ line, x, 1, s - start);
x += s - start + 1;
start = s + 1;
} else {
@@ -1630,8 +1651,8 @@ static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info,
d++;
} while (s < le);
if (s > start)
- ops->bmove(vc, info, line + ycount, x, line, x, 1,
- s-start);
+ par->bitops->bmove(vc, info, line + ycount, x, line, x, 1,
+ s - start);
console_conditional_schedule();
if (ycount > 0)
line++;
@@ -1702,7 +1723,7 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy,
int dy, int dx, int height, int width, u_int y_break)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
u_int b;
if (sy < y_break && sy + height > y_break) {
@@ -1736,8 +1757,8 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy,
}
return;
}
- ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
- height, width);
+ par->bitops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
+ height, width);
}
static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
@@ -1964,15 +1985,13 @@ static void updatescrollmode_accel(struct fbcon_display *p,
struct vc_data *vc)
{
#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
int cap = info->flags;
u16 t = 0;
- int ypan = FBCON_SWAP(ops->rotate, info->fix.ypanstep,
- info->fix.xpanstep);
- int ywrap = FBCON_SWAP(ops->rotate, info->fix.ywrapstep, t);
- int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
- int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual,
- info->var.xres_virtual);
+ int ypan = FBCON_SWAP(par->rotate, info->fix.ypanstep, info->fix.xpanstep);
+ int ywrap = FBCON_SWAP(par->rotate, info->fix.ywrapstep, t);
+ int yres = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres);
+ int vyres = FBCON_SWAP(par->rotate, info->var.yres_virtual, info->var.xres_virtual);
int good_pan = (cap & FBINFO_HWACCEL_YPAN) &&
divides(ypan, vc->vc_font.height) && vyres > yres;
int good_wrap = (cap & FBINFO_HWACCEL_YWRAP) &&
@@ -2005,11 +2024,10 @@ static void updatescrollmode(struct fbcon_display *p,
struct fb_info *info,
struct vc_data *vc)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
int fh = vc->vc_font.height;
- int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
- int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual,
- info->var.xres_virtual);
+ int yres = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres);
+ int vyres = FBCON_SWAP(par->rotate, info->var.yres_virtual, info->var.xres_virtual);
p->vrows = vyres/fh;
if (yres > (fh * (vc->vc_rows + 1)))
@@ -2028,7 +2046,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
unsigned int height, bool from_user)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
struct fb_var_screeninfo var = info->var;
int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh;
@@ -2051,12 +2069,10 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
return -EINVAL;
}
- virt_w = FBCON_SWAP(ops->rotate, width, height);
- virt_h = FBCON_SWAP(ops->rotate, height, width);
- virt_fw = FBCON_SWAP(ops->rotate, vc->vc_font.width,
- vc->vc_font.height);
- virt_fh = FBCON_SWAP(ops->rotate, vc->vc_font.height,
- vc->vc_font.width);
+ virt_w = FBCON_SWAP(par->rotate, width, height);
+ virt_h = FBCON_SWAP(par->rotate, height, width);
+ virt_fw = FBCON_SWAP(par->rotate, vc->vc_font.width, vc->vc_font.height);
+ virt_fh = FBCON_SWAP(par->rotate, vc->vc_font.height, vc->vc_font.width);
var.xres = virt_w * virt_fw;
var.yres = virt_h * virt_fh;
x_diff = info->var.xres - var.xres;
@@ -2082,7 +2098,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
fb_set_var(info, &var);
}
var_to_display(p, &info->var, info);
- ops->var = info->var;
+ par->var = info->var;
}
updatescrollmode(p, info, vc);
return 0;
@@ -2091,13 +2107,13 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
static bool fbcon_switch(struct vc_data *vc)
{
struct fb_info *info, *old_info = NULL;
- struct fbcon_ops *ops;
+ struct fbcon_par *par;
struct fbcon_display *p = &fb_display[vc->vc_num];
struct fb_var_screeninfo var;
int i, ret, prev_console;
info = fbcon_info_from_console(vc->vc_num);
- ops = info->fbcon_par;
+ par = info->fbcon_par;
if (logo_shown >= 0) {
struct vc_data *conp2 = vc_cons[logo_shown].d;
@@ -2108,7 +2124,7 @@ static bool fbcon_switch(struct vc_data *vc)
logo_shown = FBCON_LOGO_CANSHOW;
}
- prev_console = ops->currcon;
+ prev_console = par->currcon;
if (prev_console != -1)
old_info = fbcon_info_from_console(prev_console);
/*
@@ -2121,9 +2137,9 @@ static bool fbcon_switch(struct vc_data *vc)
*/
fbcon_for_each_registered_fb(i) {
if (fbcon_registered_fb[i]->fbcon_par) {
- struct fbcon_ops *o = fbcon_registered_fb[i]->fbcon_par;
+ struct fbcon_par *par = fbcon_registered_fb[i]->fbcon_par;
- o->currcon = vc->vc_num;
+ par->currcon = vc->vc_num;
}
}
memset(&var, 0, sizeof(struct fb_var_screeninfo));
@@ -2137,7 +2153,7 @@ static bool fbcon_switch(struct vc_data *vc)
info->var.activate = var.activate;
var.vmode |= info->var.vmode & ~FB_VMODE_MASK;
fb_set_var(info, &var);
- ops->var = info->var;
+ par->var = info->var;
if (old_info != NULL && (old_info != info ||
info->flags & FBINFO_MISC_ALWAYS_SETPAR)) {
@@ -2154,17 +2170,16 @@ static bool fbcon_switch(struct vc_data *vc)
fbcon_del_cursor_work(old_info);
}
- if (!fbcon_is_active(vc, info) ||
- ops->blank_state != FB_BLANK_UNBLANK)
+ if (!fbcon_is_active(vc, info) || par->blank_state != FB_BLANK_UNBLANK)
fbcon_del_cursor_work(info);
else
fbcon_add_cursor_work(info);
set_blitting_type(vc, info);
- ops->cursor_reset = 1;
+ par->cursor_reset = 1;
- if (ops->rotate_font && ops->rotate_font(info, vc)) {
- ops->rotate = FB_ROTATE_UR;
+ if (par->bitops->rotate_font && par->bitops->rotate_font(info, vc)) {
+ par->rotate = FB_ROTATE_UR;
set_blitting_type(vc, info);
}
@@ -2195,8 +2210,8 @@ static bool fbcon_switch(struct vc_data *vc)
scrollback_current = 0;
if (fbcon_is_active(vc, info)) {
- ops->var.xoffset = ops->var.yoffset = p->yscroll = 0;
- ops->update_start(info);
+ par->var.xoffset = par->var.yoffset = p->yscroll = 0;
+ par->bitops->update_start(info);
}
fbcon_set_palette(vc, color_table);
@@ -2205,7 +2220,7 @@ static bool fbcon_switch(struct vc_data *vc)
if (logo_shown == FBCON_LOGO_DRAW) {
logo_shown = fg_console;
- fb_show_logo(info, ops->rotate);
+ fb_show_logo(info, par->rotate);
update_region(vc,
vc->vc_origin + vc->vc_size_row * vc->vc_top,
vc->vc_size_row * (vc->vc_bottom -
@@ -2234,27 +2249,27 @@ static bool fbcon_blank(struct vc_data *vc, enum vesa_blank_mode blank,
bool mode_switch)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
if (mode_switch) {
struct fb_var_screeninfo var = info->var;
- ops->graphics = 1;
+ par->graphics = 1;
if (!blank) {
var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE |
FB_ACTIVATE_KD_TEXT;
fb_set_var(info, &var);
- ops->graphics = 0;
- ops->var = info->var;
+ par->graphics = 0;
+ par->var = info->var;
}
}
if (fbcon_is_active(vc, info)) {
- if (ops->blank_state != blank) {
- ops->blank_state = blank;
+ if (par->blank_state != blank) {
+ par->blank_state = blank;
fbcon_cursor(vc, !blank);
- ops->cursor_flash = (!blank);
+ par->cursor_flash = (!blank);
if (fb_blank(info, blank))
fbcon_generic_blank(vc, info, blank);
@@ -2264,8 +2279,7 @@ static bool fbcon_blank(struct vc_data *vc, enum vesa_blank_mode blank,
update_screen(vc);
}
- if (mode_switch || !fbcon_is_active(vc, info) ||
- ops->blank_state != FB_BLANK_UNBLANK)
+ if (mode_switch || !fbcon_is_active(vc, info) || par->blank_state != FB_BLANK_UNBLANK)
fbcon_del_cursor_work(info);
else
fbcon_add_cursor_work(info);
@@ -2276,10 +2290,10 @@ static bool fbcon_blank(struct vc_data *vc, enum vesa_blank_mode blank,
static void fbcon_debug_enter(struct vc_data *vc)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
- ops->save_graphics = ops->graphics;
- ops->graphics = 0;
+ par->save_graphics = par->graphics;
+ par->graphics = 0;
if (info->fbops->fb_debug_enter)
info->fbops->fb_debug_enter(info);
fbcon_set_palette(vc, color_table);
@@ -2288,9 +2302,9 @@ static void fbcon_debug_enter(struct vc_data *vc)
static void fbcon_debug_leave(struct vc_data *vc)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
- ops->graphics = ops->save_graphics;
+ par->graphics = par->save_graphics;
if (info->fbops->fb_debug_leave)
info->fbops->fb_debug_leave(info);
}
@@ -2425,7 +2439,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
const u8 * data, int userfont)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
int resize, ret, old_userfont, old_width, old_height, old_charcount;
u8 *old_data = vc->vc_font.data;
@@ -2451,8 +2465,8 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
if (resize) {
int cols, rows;
- cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
- rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres);
+ rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres);
cols /= w;
rows /= h;
ret = vc_resize(vc, cols, rows);
@@ -2651,11 +2665,11 @@ static void fbcon_invert_region(struct vc_data *vc, u16 * p, int cnt)
void fbcon_suspended(struct fb_info *info)
{
struct vc_data *vc = NULL;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
- if (!ops || ops->currcon < 0)
+ if (!par || par->currcon < 0)
return;
- vc = vc_cons[ops->currcon].d;
+ vc = vc_cons[par->currcon].d;
/* Clear cursor, restore saved data */
fbcon_cursor(vc, false);
@@ -2664,27 +2678,27 @@ void fbcon_suspended(struct fb_info *info)
void fbcon_resumed(struct fb_info *info)
{
struct vc_data *vc;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
- if (!ops || ops->currcon < 0)
+ if (!par || par->currcon < 0)
return;
- vc = vc_cons[ops->currcon].d;
+ vc = vc_cons[par->currcon].d;
update_screen(vc);
}
static void fbcon_modechanged(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct vc_data *vc;
struct fbcon_display *p;
int rows, cols;
- if (!ops || ops->currcon < 0)
+ if (!par || par->currcon < 0)
return;
- vc = vc_cons[ops->currcon].d;
+ vc = vc_cons[par->currcon].d;
if (vc->vc_mode != KD_TEXT ||
- fbcon_info_from_console(ops->currcon) != info)
+ fbcon_info_from_console(par->currcon) != info)
return;
p = &fb_display[vc->vc_num];
@@ -2692,8 +2706,8 @@ static void fbcon_modechanged(struct fb_info *info)
if (con_is_visible(vc)) {
var_to_display(p, &info->var, info);
- cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
- rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres);
+ rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres);
cols /= vc->vc_font.width;
rows /= vc->vc_font.height;
vc_resize(vc, cols, rows);
@@ -2702,8 +2716,8 @@ static void fbcon_modechanged(struct fb_info *info)
scrollback_current = 0;
if (fbcon_is_active(vc, info)) {
- ops->var.xoffset = ops->var.yoffset = p->yscroll = 0;
- ops->update_start(info);
+ par->var.xoffset = par->var.yoffset = p->yscroll = 0;
+ par->bitops->update_start(info);
}
fbcon_set_palette(vc, color_table);
@@ -2713,12 +2727,12 @@ static void fbcon_modechanged(struct fb_info *info)
static void fbcon_set_all_vcs(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct vc_data *vc;
struct fbcon_display *p;
int i, rows, cols, fg = -1;
- if (!ops || ops->currcon < 0)
+ if (!par || par->currcon < 0)
return;
for (i = first_fb_vc; i <= last_fb_vc; i++) {
@@ -2735,8 +2749,8 @@ static void fbcon_set_all_vcs(struct fb_info *info)
p = &fb_display[vc->vc_num];
set_blitting_type(vc, info);
var_to_display(p, &info->var, info);
- cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
- rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres);
+ rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres);
cols /= vc->vc_font.width;
rows /= vc->vc_font.height;
vc_resize(vc, cols, rows);
@@ -2759,13 +2773,13 @@ EXPORT_SYMBOL(fbcon_update_vcs);
/* let fbcon check if it supports a new screen resolution */
int fbcon_modechange_possible(struct fb_info *info, struct fb_var_screeninfo *var)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct vc_data *vc;
unsigned int i;
WARN_CONSOLE_UNLOCKED();
- if (!ops)
+ if (!par)
return 0;
/* prevent setting a screen size which is smaller than font size */
@@ -3037,15 +3051,14 @@ int fbcon_fb_registered(struct fb_info *info)
void fbcon_fb_blanked(struct fb_info *info, int blank)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct vc_data *vc;
- if (!ops || ops->currcon < 0)
+ if (!par || par->currcon < 0)
return;
- vc = vc_cons[ops->currcon].d;
- if (vc->vc_mode != KD_TEXT ||
- fbcon_info_from_console(ops->currcon) != info)
+ vc = vc_cons[par->currcon].d;
+ if (vc->vc_mode != KD_TEXT || fbcon_info_from_console(par->currcon) != info)
return;
if (con_is_visible(vc)) {
@@ -3054,7 +3067,7 @@ void fbcon_fb_blanked(struct fb_info *info, int blank)
else
do_unblank_screen(0);
}
- ops->blank_state = blank;
+ par->blank_state = blank;
}
void fbcon_new_modelist(struct fb_info *info)
@@ -3244,7 +3257,7 @@ static ssize_t cursor_blink_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct fb_info *info;
- struct fbcon_ops *ops;
+ struct fbcon_par *par;
int idx, blink = -1;
console_lock();
@@ -3254,12 +3267,12 @@ static ssize_t cursor_blink_show(struct device *device,
goto err;
info = fbcon_registered_fb[idx];
- ops = info->fbcon_par;
+ par = info->fbcon_par;
- if (!ops)
+ if (!par)
goto err;
- blink = delayed_work_pending(&ops->cursor_work);
+ blink = delayed_work_pending(&par->cursor_work);
err:
console_unlock();
return sysfs_emit(buf, "%d\n", blink);
diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
index 4d97e6d8a16a..44ea4ae4bba0 100644
--- a/drivers/video/fbdev/core/fbcon.h
+++ b/drivers/video/fbdev/core/fbcon.h
@@ -51,7 +51,7 @@ struct fbcon_display {
const struct fb_videomode *mode;
};
-struct fbcon_ops {
+struct fbcon_bitops {
void (*bmove)(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int dy, int dx, int height, int width);
void (*clear)(struct vc_data *vc, struct fb_info *info, int sy,
@@ -65,6 +65,9 @@ struct fbcon_ops {
bool enable, int fg, int bg);
int (*update_start)(struct fb_info *info);
int (*rotate_font)(struct fb_info *info, struct vc_data *vc);
+};
+
+struct fbcon_par {
struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */
struct delayed_work cursor_work; /* Cursor timer */
struct fb_cursor cursor_state;
@@ -86,7 +89,10 @@ struct fbcon_ops {
u8 *cursor_src;
u32 cursor_size;
u32 fd_size;
+
+ const struct fbcon_bitops *bitops;
};
+
/*
* Attribute Decoding
*/
@@ -106,7 +112,6 @@ struct fbcon_ops {
((s) & 0x400)
#define attr_blink(s) \
((s) & 0x8000)
-
static inline int mono_col(const struct fb_info *info)
{
@@ -186,7 +191,7 @@ static inline u_short fb_scrollmode(struct fbcon_display *fb)
#ifdef CONFIG_FB_TILEBLITTING
extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info);
#endif
-extern void fbcon_set_bitops(struct fbcon_ops *ops);
+extern void fbcon_set_bitops_ur(struct fbcon_par *par);
extern int soft_cursor(struct fb_info *info, struct fb_cursor *cursor);
#define FBCON_ATTRIBUTE_UNDERLINE 1
@@ -224,10 +229,4 @@ static inline int get_attribute(struct fb_info *info, u16 c)
(void) (&_r == &_v); \
(i == FB_ROTATE_UR || i == FB_ROTATE_UD) ? _r : _v; })
-#ifdef CONFIG_FRAMEBUFFER_CONSOLE_ROTATION
-extern void fbcon_set_rotate(struct fbcon_ops *ops);
-#else
-#define fbcon_set_rotate(x) do {} while(0)
-#endif /* CONFIG_FRAMEBUFFER_CONSOLE_ROTATION */
-
#endif /* _VIDEO_FBCON_H */
diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c
index 89ef4ba7e867..2f394b5a17f7 100644
--- a/drivers/video/fbdev/core/fbcon_ccw.c
+++ b/drivers/video/fbdev/core/fbcon_ccw.c
@@ -63,9 +63,9 @@ static void ccw_update_attr(u8 *dst, u8 *src, int attribute,
static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int dy, int dx, int height, int width)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fb_copyarea area;
- u32 vyres = GETVYRES(ops->p, info);
+ u32 vyres = GETVYRES(par->p, info);
area.sx = sy * vc->vc_font.height;
area.sy = vyres - ((sx + width) * vc->vc_font.width);
@@ -80,9 +80,9 @@ static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width, int fg, int bg)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fb_fillrect region;
- u32 vyres = GETVYRES(ops->p, info);
+ u32 vyres = GETVYRES(par->p, info);
region.color = bg;
region.dx = sy * vc->vc_font.height;
@@ -99,13 +99,13 @@ static inline void ccw_putcs_aligned(struct vc_data *vc, struct fb_info *info,
u32 d_pitch, u32 s_pitch, u32 cellsize,
struct fb_image *image, u8 *buf, u8 *dst)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
u32 idx = (vc->vc_font.height + 7) >> 3;
u8 *src;
while (cnt--) {
- src = ops->fontbuffer + (scr_readw(s--) & charmask)*cellsize;
+ src = par->fontbuffer + (scr_readw(s--) & charmask) * cellsize;
if (attr) {
ccw_update_attr(buf, src, attr, vc);
@@ -130,7 +130,7 @@ static void ccw_putcs(struct vc_data *vc, struct fb_info *info,
int fg, int bg)
{
struct fb_image image;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
u32 width = (vc->vc_font.height + 7)/8;
u32 cellsize = width * vc->vc_font.width;
u32 maxcnt = info->pixmap.size/cellsize;
@@ -139,9 +139,9 @@ static void ccw_putcs(struct vc_data *vc, struct fb_info *info,
u32 cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
- u32 vyres = GETVYRES(ops->p, info);
+ u32 vyres = GETVYRES(par->p, info);
- if (!ops->fontbuffer)
+ if (!par->fontbuffer)
return;
image.fg_color = fg;
@@ -221,28 +221,28 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
int fg, int bg)
{
struct fb_cursor cursor;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
int w = (vc->vc_font.height + 7) >> 3, c;
- int y = real_y(ops->p, vc->state.y);
+ int y = real_y(par->p, vc->state.y);
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1, dx, dy;
char *src;
- u32 vyres = GETVYRES(ops->p, info);
+ u32 vyres = GETVYRES(par->p, info);
- if (!ops->fontbuffer)
+ if (!par->fontbuffer)
return;
cursor.set = 0;
c = scr_readw((u16 *) vc->vc_pos);
attribute = get_attribute(info, c);
- src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width));
+ src = par->fontbuffer + ((c & charmask) * (w * vc->vc_font.width));
- if (ops->cursor_state.image.data != src ||
- ops->cursor_reset) {
- ops->cursor_state.image.data = src;
- cursor.set |= FB_CUR_SETIMAGE;
+ if (par->cursor_state.image.data != src ||
+ par->cursor_reset) {
+ par->cursor_state.image.data = src;
+ cursor.set |= FB_CUR_SETIMAGE;
}
if (attribute) {
@@ -251,49 +251,49 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
dst = kmalloc_array(w, vc->vc_font.width, GFP_ATOMIC);
if (!dst)
return;
- kfree(ops->cursor_data);
- ops->cursor_data = dst;
+ kfree(par->cursor_data);
+ par->cursor_data = dst;
ccw_update_attr(dst, src, attribute, vc);
src = dst;
}
- if (ops->cursor_state.image.fg_color != fg ||
- ops->cursor_state.image.bg_color != bg ||
- ops->cursor_reset) {
- ops->cursor_state.image.fg_color = fg;
- ops->cursor_state.image.bg_color = bg;
+ if (par->cursor_state.image.fg_color != fg ||
+ par->cursor_state.image.bg_color != bg ||
+ par->cursor_reset) {
+ par->cursor_state.image.fg_color = fg;
+ par->cursor_state.image.bg_color = bg;
cursor.set |= FB_CUR_SETCMAP;
}
- if (ops->cursor_state.image.height != vc->vc_font.width ||
- ops->cursor_state.image.width != vc->vc_font.height ||
- ops->cursor_reset) {
- ops->cursor_state.image.height = vc->vc_font.width;
- ops->cursor_state.image.width = vc->vc_font.height;
+ if (par->cursor_state.image.height != vc->vc_font.width ||
+ par->cursor_state.image.width != vc->vc_font.height ||
+ par->cursor_reset) {
+ par->cursor_state.image.height = vc->vc_font.width;
+ par->cursor_state.image.width = vc->vc_font.height;
cursor.set |= FB_CUR_SETSIZE;
}
dx = y * vc->vc_font.height;
dy = vyres - ((vc->state.x + 1) * vc->vc_font.width);
- if (ops->cursor_state.image.dx != dx ||
- ops->cursor_state.image.dy != dy ||
- ops->cursor_reset) {
- ops->cursor_state.image.dx = dx;
- ops->cursor_state.image.dy = dy;
+ if (par->cursor_state.image.dx != dx ||
+ par->cursor_state.image.dy != dy ||
+ par->cursor_reset) {
+ par->cursor_state.image.dx = dx;
+ par->cursor_state.image.dy = dy;
cursor.set |= FB_CUR_SETPOS;
}
- if (ops->cursor_state.hot.x || ops->cursor_state.hot.y ||
- ops->cursor_reset) {
- ops->cursor_state.hot.x = cursor.hot.y = 0;
+ if (par->cursor_state.hot.x || par->cursor_state.hot.y ||
+ par->cursor_reset) {
+ par->cursor_state.hot.x = cursor.hot.y = 0;
cursor.set |= FB_CUR_SETHOT;
}
if (cursor.set & FB_CUR_SETSIZE ||
- vc->vc_cursor_type != ops->p->cursor_shape ||
- ops->cursor_state.mask == NULL ||
- ops->cursor_reset) {
+ vc->vc_cursor_type != par->p->cursor_shape ||
+ par->cursor_state.mask == NULL ||
+ par->cursor_reset) {
char *tmp, *mask = kmalloc_array(w, vc->vc_font.width,
GFP_ATOMIC);
int cur_height, size, i = 0;
@@ -309,13 +309,13 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
return;
}
- kfree(ops->cursor_state.mask);
- ops->cursor_state.mask = mask;
+ kfree(par->cursor_state.mask);
+ par->cursor_state.mask = mask;
- ops->p->cursor_shape = vc->vc_cursor_type;
+ par->p->cursor_shape = vc->vc_cursor_type;
cursor.set |= FB_CUR_SETSHAPE;
- switch (CUR_SIZE(ops->p->cursor_shape)) {
+ switch (CUR_SIZE(par->p->cursor_shape)) {
case CUR_NONE:
cur_height = 0;
break;
@@ -348,19 +348,19 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
kfree(tmp);
}
- ops->cursor_state.enable = enable && !use_sw;
+ par->cursor_state.enable = enable && !use_sw;
cursor.image.data = src;
- cursor.image.fg_color = ops->cursor_state.image.fg_color;
- cursor.image.bg_color = ops->cursor_state.image.bg_color;
- cursor.image.dx = ops->cursor_state.image.dx;
- cursor.image.dy = ops->cursor_state.image.dy;
- cursor.image.height = ops->cursor_state.image.height;
- cursor.image.width = ops->cursor_state.image.width;
- cursor.hot.x = ops->cursor_state.hot.x;
- cursor.hot.y = ops->cursor_state.hot.y;
- cursor.mask = ops->cursor_state.mask;
- cursor.enable = ops->cursor_state.enable;
+ cursor.image.fg_color = par->cursor_state.image.fg_color;
+ cursor.image.bg_color = par->cursor_state.image.bg_color;
+ cursor.image.dx = par->cursor_state.image.dx;
+ cursor.image.dy = par->cursor_state.image.dy;
+ cursor.image.height = par->cursor_state.image.height;
+ cursor.image.width = par->cursor_state.image.width;
+ cursor.hot.x = par->cursor_state.hot.x;
+ cursor.hot.y = par->cursor_state.hot.y;
+ cursor.mask = par->cursor_state.mask;
+ cursor.enable = par->cursor_state.enable;
cursor.image.depth = 1;
cursor.rop = ROP_XOR;
@@ -370,32 +370,37 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
if (err)
soft_cursor(info, &cursor);
- ops->cursor_reset = 0;
+ par->cursor_reset = 0;
}
static int ccw_update_start(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
u32 yoffset;
- u32 vyres = GETVYRES(ops->p, info);
+ u32 vyres = GETVYRES(par->p, info);
int err;
- yoffset = (vyres - info->var.yres) - ops->var.xoffset;
- ops->var.xoffset = ops->var.yoffset;
- ops->var.yoffset = yoffset;
- err = fb_pan_display(info, &ops->var);
- ops->var.xoffset = info->var.xoffset;
- ops->var.yoffset = info->var.yoffset;
- ops->var.vmode = info->var.vmode;
+ yoffset = (vyres - info->var.yres) - par->var.xoffset;
+ par->var.xoffset = par->var.yoffset;
+ par->var.yoffset = yoffset;
+ err = fb_pan_display(info, &par->var);
+ par->var.xoffset = info->var.xoffset;
+ par->var.yoffset = info->var.yoffset;
+ par->var.vmode = info->var.vmode;
return err;
}
-void fbcon_rotate_ccw(struct fbcon_ops *ops)
+static const struct fbcon_bitops ccw_fbcon_bitops = {
+ .bmove = ccw_bmove,
+ .clear = ccw_clear,
+ .putcs = ccw_putcs,
+ .clear_margins = ccw_clear_margins,
+ .cursor = ccw_cursor,
+ .update_start = ccw_update_start,
+ .rotate_font = fbcon_rotate_font,
+};
+
+void fbcon_set_bitops_ccw(struct fbcon_par *par)
{
- ops->bmove = ccw_bmove;
- ops->clear = ccw_clear;
- ops->putcs = ccw_putcs;
- ops->clear_margins = ccw_clear_margins;
- ops->cursor = ccw_cursor;
- ops->update_start = ccw_update_start;
+ par->bitops = &ccw_fbcon_bitops;
}
diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c
index b9dac7940fb7..3c3ad3471ec4 100644
--- a/drivers/video/fbdev/core/fbcon_cw.c
+++ b/drivers/video/fbdev/core/fbcon_cw.c
@@ -48,9 +48,9 @@ static void cw_update_attr(u8 *dst, u8 *src, int attribute,
static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int dy, int dx, int height, int width)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fb_copyarea area;
- u32 vxres = GETVXRES(ops->p, info);
+ u32 vxres = GETVXRES(par->p, info);
area.sx = vxres - ((sy + height) * vc->vc_font.height);
area.sy = sx * vc->vc_font.width;
@@ -65,9 +65,9 @@ static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width, int fg, int bg)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fb_fillrect region;
- u32 vxres = GETVXRES(ops->p, info);
+ u32 vxres = GETVXRES(par->p, info);
region.color = bg;
region.dx = vxres - ((sy + height) * vc->vc_font.height);
@@ -84,13 +84,13 @@ static inline void cw_putcs_aligned(struct vc_data *vc, struct fb_info *info,
u32 d_pitch, u32 s_pitch, u32 cellsize,
struct fb_image *image, u8 *buf, u8 *dst)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
u32 idx = (vc->vc_font.height + 7) >> 3;
u8 *src;
while (cnt--) {
- src = ops->fontbuffer + (scr_readw(s++) & charmask)*cellsize;
+ src = par->fontbuffer + (scr_readw(s++) & charmask) * cellsize;
if (attr) {
cw_update_attr(buf, src, attr, vc);
@@ -115,7 +115,7 @@ static void cw_putcs(struct vc_data *vc, struct fb_info *info,
int fg, int bg)
{
struct fb_image image;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
u32 width = (vc->vc_font.height + 7)/8;
u32 cellsize = width * vc->vc_font.width;
u32 maxcnt = info->pixmap.size/cellsize;
@@ -124,9 +124,9 @@ static void cw_putcs(struct vc_data *vc, struct fb_info *info,
u32 cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
- u32 vxres = GETVXRES(ops->p, info);
+ u32 vxres = GETVXRES(par->p, info);
- if (!ops->fontbuffer)
+ if (!par->fontbuffer)
return;
image.fg_color = fg;
@@ -204,28 +204,28 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
int fg, int bg)
{
struct fb_cursor cursor;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
int w = (vc->vc_font.height + 7) >> 3, c;
- int y = real_y(ops->p, vc->state.y);
+ int y = real_y(par->p, vc->state.y);
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1, dx, dy;
char *src;
- u32 vxres = GETVXRES(ops->p, info);
+ u32 vxres = GETVXRES(par->p, info);
- if (!ops->fontbuffer)
+ if (!par->fontbuffer)
return;
cursor.set = 0;
c = scr_readw((u16 *) vc->vc_pos);
attribute = get_attribute(info, c);
- src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width));
+ src = par->fontbuffer + ((c & charmask) * (w * vc->vc_font.width));
- if (ops->cursor_state.image.data != src ||
- ops->cursor_reset) {
- ops->cursor_state.image.data = src;
- cursor.set |= FB_CUR_SETIMAGE;
+ if (par->cursor_state.image.data != src ||
+ par->cursor_reset) {
+ par->cursor_state.image.data = src;
+ cursor.set |= FB_CUR_SETIMAGE;
}
if (attribute) {
@@ -234,49 +234,49 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
dst = kmalloc_array(w, vc->vc_font.width, GFP_ATOMIC);
if (!dst)
return;
- kfree(ops->cursor_data);
- ops->cursor_data = dst;
+ kfree(par->cursor_data);
+ par->cursor_data = dst;
cw_update_attr(dst, src, attribute, vc);
src = dst;
}
- if (ops->cursor_state.image.fg_color != fg ||
- ops->cursor_state.image.bg_color != bg ||
- ops->cursor_reset) {
- ops->cursor_state.image.fg_color = fg;
- ops->cursor_state.image.bg_color = bg;
+ if (par->cursor_state.image.fg_color != fg ||
+ par->cursor_state.image.bg_color != bg ||
+ par->cursor_reset) {
+ par->cursor_state.image.fg_color = fg;
+ par->cursor_state.image.bg_color = bg;
cursor.set |= FB_CUR_SETCMAP;
}
- if (ops->cursor_state.image.height != vc->vc_font.width ||
- ops->cursor_state.image.width != vc->vc_font.height ||
- ops->cursor_reset) {
- ops->cursor_state.image.height = vc->vc_font.width;
- ops->cursor_state.image.width = vc->vc_font.height;
+ if (par->cursor_state.image.height != vc->vc_font.width ||
+ par->cursor_state.image.width != vc->vc_font.height ||
+ par->cursor_reset) {
+ par->cursor_state.image.height = vc->vc_font.width;
+ par->cursor_state.image.width = vc->vc_font.height;
cursor.set |= FB_CUR_SETSIZE;
}
dx = vxres - ((y * vc->vc_font.height) + vc->vc_font.height);
dy = vc->state.x * vc->vc_font.width;
- if (ops->cursor_state.image.dx != dx ||
- ops->cursor_state.image.dy != dy ||
- ops->cursor_reset) {
- ops->cursor_state.image.dx = dx;
- ops->cursor_state.image.dy = dy;
+ if (par->cursor_state.image.dx != dx ||
+ par->cursor_state.image.dy != dy ||
+ par->cursor_reset) {
+ par->cursor_state.image.dx = dx;
+ par->cursor_state.image.dy = dy;
cursor.set |= FB_CUR_SETPOS;
}
- if (ops->cursor_state.hot.x || ops->cursor_state.hot.y ||
- ops->cursor_reset) {
- ops->cursor_state.hot.x = cursor.hot.y = 0;
+ if (par->cursor_state.hot.x || par->cursor_state.hot.y ||
+ par->cursor_reset) {
+ par->cursor_state.hot.x = cursor.hot.y = 0;
cursor.set |= FB_CUR_SETHOT;
}
if (cursor.set & FB_CUR_SETSIZE ||
- vc->vc_cursor_type != ops->p->cursor_shape ||
- ops->cursor_state.mask == NULL ||
- ops->cursor_reset) {
+ vc->vc_cursor_type != par->p->cursor_shape ||
+ par->cursor_state.mask == NULL ||
+ par->cursor_reset) {
char *tmp, *mask = kmalloc_array(w, vc->vc_font.width,
GFP_ATOMIC);
int cur_height, size, i = 0;
@@ -292,13 +292,13 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
return;
}
- kfree(ops->cursor_state.mask);
- ops->cursor_state.mask = mask;
+ kfree(par->cursor_state.mask);
+ par->cursor_state.mask = mask;
- ops->p->cursor_shape = vc->vc_cursor_type;
+ par->p->cursor_shape = vc->vc_cursor_type;
cursor.set |= FB_CUR_SETSHAPE;
- switch (CUR_SIZE(ops->p->cursor_shape)) {
+ switch (CUR_SIZE(par->p->cursor_shape)) {
case CUR_NONE:
cur_height = 0;
break;
@@ -331,19 +331,19 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
kfree(tmp);
}
- ops->cursor_state.enable = enable && !use_sw;
+ par->cursor_state.enable = enable && !use_sw;
cursor.image.data = src;
- cursor.image.fg_color = ops->cursor_state.image.fg_color;
- cursor.image.bg_color = ops->cursor_state.image.bg_color;
- cursor.image.dx = ops->cursor_state.image.dx;
- cursor.image.dy = ops->cursor_state.image.dy;
- cursor.image.height = ops->cursor_state.image.height;
- cursor.image.width = ops->cursor_state.image.width;
- cursor.hot.x = ops->cursor_state.hot.x;
- cursor.hot.y = ops->cursor_state.hot.y;
- cursor.mask = ops->cursor_state.mask;
- cursor.enable = ops->cursor_state.enable;
+ cursor.image.fg_color = par->cursor_state.image.fg_color;
+ cursor.image.bg_color = par->cursor_state.image.bg_color;
+ cursor.image.dx = par->cursor_state.image.dx;
+ cursor.image.dy = par->cursor_state.image.dy;
+ cursor.image.height = par->cursor_state.image.height;
+ cursor.image.width = par->cursor_state.image.width;
+ cursor.hot.x = par->cursor_state.hot.x;
+ cursor.hot.y = par->cursor_state.hot.y;
+ cursor.mask = par->cursor_state.mask;
+ cursor.enable = par->cursor_state.enable;
cursor.image.depth = 1;
cursor.rop = ROP_XOR;
@@ -353,32 +353,37 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
if (err)
soft_cursor(info, &cursor);
- ops->cursor_reset = 0;
+ par->cursor_reset = 0;
}
static int cw_update_start(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
- u32 vxres = GETVXRES(ops->p, info);
+ struct fbcon_par *par = info->fbcon_par;
+ u32 vxres = GETVXRES(par->p, info);
u32 xoffset;
int err;
- xoffset = vxres - (info->var.xres + ops->var.yoffset);
- ops->var.yoffset = ops->var.xoffset;
- ops->var.xoffset = xoffset;
- err = fb_pan_display(info, &ops->var);
- ops->var.xoffset = info->var.xoffset;
- ops->var.yoffset = info->var.yoffset;
- ops->var.vmode = info->var.vmode;
+ xoffset = vxres - (info->var.xres + par->var.yoffset);
+ par->var.yoffset = par->var.xoffset;
+ par->var.xoffset = xoffset;
+ err = fb_pan_display(info, &par->var);
+ par->var.xoffset = info->var.xoffset;
+ par->var.yoffset = info->var.yoffset;
+ par->var.vmode = info->var.vmode;
return err;
}
-void fbcon_rotate_cw(struct fbcon_ops *ops)
+static const struct fbcon_bitops cw_fbcon_bitops = {
+ .bmove = cw_bmove,
+ .clear = cw_clear,
+ .putcs = cw_putcs,
+ .clear_margins = cw_clear_margins,
+ .cursor = cw_cursor,
+ .update_start = cw_update_start,
+ .rotate_font = fbcon_rotate_font,
+};
+
+void fbcon_set_bitops_cw(struct fbcon_par *par)
{
- ops->bmove = cw_bmove;
- ops->clear = cw_clear;
- ops->putcs = cw_putcs;
- ops->clear_margins = cw_clear_margins;
- ops->cursor = cw_cursor;
- ops->update_start = cw_update_start;
+ par->bitops = &cw_fbcon_bitops;
}
diff --git a/drivers/video/fbdev/core/fbcon_rotate.c b/drivers/video/fbdev/core/fbcon_rotate.c
index ec3c883400f7..1562a8f20b4f 100644
--- a/drivers/video/fbdev/core/fbcon_rotate.c
+++ b/drivers/video/fbdev/core/fbcon_rotate.c
@@ -18,34 +18,34 @@
#include "fbcon.h"
#include "fbcon_rotate.h"
-static int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc)
+int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
int len, err = 0;
int s_cellsize, d_cellsize, i;
const u8 *src;
u8 *dst;
- if (vc->vc_font.data == ops->fontdata &&
- ops->p->con_rotate == ops->cur_rotate)
+ if (vc->vc_font.data == par->fontdata &&
+ par->p->con_rotate == par->cur_rotate)
goto finished;
- src = ops->fontdata = vc->vc_font.data;
- ops->cur_rotate = ops->p->con_rotate;
+ src = par->fontdata = vc->vc_font.data;
+ par->cur_rotate = par->p->con_rotate;
len = vc->vc_font.charcount;
s_cellsize = ((vc->vc_font.width + 7)/8) *
vc->vc_font.height;
d_cellsize = s_cellsize;
- if (ops->rotate == FB_ROTATE_CW ||
- ops->rotate == FB_ROTATE_CCW)
+ if (par->rotate == FB_ROTATE_CW ||
+ par->rotate == FB_ROTATE_CCW)
d_cellsize = ((vc->vc_font.height + 7)/8) *
vc->vc_font.width;
if (info->fbops->fb_sync)
info->fbops->fb_sync(info);
- if (ops->fd_size < d_cellsize * len) {
+ if (par->fd_size < d_cellsize * len) {
dst = kmalloc_array(len, d_cellsize, GFP_KERNEL);
if (dst == NULL) {
@@ -53,15 +53,15 @@ static int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc)
goto finished;
}
- ops->fd_size = d_cellsize * len;
- kfree(ops->fontbuffer);
- ops->fontbuffer = dst;
+ par->fd_size = d_cellsize * len;
+ kfree(par->fontbuffer);
+ par->fontbuffer = dst;
}
- dst = ops->fontbuffer;
- memset(dst, 0, ops->fd_size);
+ dst = par->fontbuffer;
+ memset(dst, 0, par->fd_size);
- switch (ops->rotate) {
+ switch (par->rotate) {
case FB_ROTATE_UD:
for (i = len; i--; ) {
rotate_ud(src, dst, vc->vc_font.width,
@@ -92,20 +92,3 @@ static int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc)
finished:
return err;
}
-
-void fbcon_set_rotate(struct fbcon_ops *ops)
-{
- ops->rotate_font = fbcon_rotate_font;
-
- switch(ops->rotate) {
- case FB_ROTATE_CW:
- fbcon_rotate_cw(ops);
- break;
- case FB_ROTATE_UD:
- fbcon_rotate_ud(ops);
- break;
- case FB_ROTATE_CCW:
- fbcon_rotate_ccw(ops);
- break;
- }
-}
diff --git a/drivers/video/fbdev/core/fbcon_rotate.h b/drivers/video/fbdev/core/fbcon_rotate.h
index 01cbe303b8a2..8cb019e8a9c0 100644
--- a/drivers/video/fbdev/core/fbcon_rotate.h
+++ b/drivers/video/fbdev/core/fbcon_rotate.h
@@ -90,7 +90,19 @@ static inline void rotate_ccw(const char *in, char *out, u32 width, u32 height)
}
}
-extern void fbcon_rotate_cw(struct fbcon_ops *ops);
-extern void fbcon_rotate_ud(struct fbcon_ops *ops);
-extern void fbcon_rotate_ccw(struct fbcon_ops *ops);
+int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc);
+
+#if defined(CONFIG_FRAMEBUFFER_CONSOLE_ROTATION)
+void fbcon_set_bitops_cw(struct fbcon_par *par);
+void fbcon_set_bitops_ud(struct fbcon_par *par);
+void fbcon_set_bitops_ccw(struct fbcon_par *par);
+#else
+static inline void fbcon_set_bitops_cw(struct fbcon_par *par)
+{ }
+static inline void fbcon_set_bitops_ud(struct fbcon_par *par)
+{ }
+static inline void fbcon_set_bitops_ccw(struct fbcon_par *par)
+{ }
+#endif
+
#endif
diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c
index 0af7913a2abd..6fc30cad5b19 100644
--- a/drivers/video/fbdev/core/fbcon_ud.c
+++ b/drivers/video/fbdev/core/fbcon_ud.c
@@ -48,10 +48,10 @@ static void ud_update_attr(u8 *dst, u8 *src, int attribute,
static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int dy, int dx, int height, int width)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fb_copyarea area;
- u32 vyres = GETVYRES(ops->p, info);
- u32 vxres = GETVXRES(ops->p, info);
+ u32 vyres = GETVYRES(par->p, info);
+ u32 vxres = GETVXRES(par->p, info);
area.sy = vyres - ((sy + height) * vc->vc_font.height);
area.sx = vxres - ((sx + width) * vc->vc_font.width);
@@ -66,10 +66,10 @@ static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy,
static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width, int fg, int bg)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fb_fillrect region;
- u32 vyres = GETVYRES(ops->p, info);
- u32 vxres = GETVXRES(ops->p, info);
+ u32 vyres = GETVYRES(par->p, info);
+ u32 vxres = GETVXRES(par->p, info);
region.color = bg;
region.dy = vyres - ((sy + height) * vc->vc_font.height);
@@ -86,13 +86,13 @@ static inline void ud_putcs_aligned(struct vc_data *vc, struct fb_info *info,
u32 d_pitch, u32 s_pitch, u32 cellsize,
struct fb_image *image, u8 *buf, u8 *dst)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
u32 idx = vc->vc_font.width >> 3;
u8 *src;
while (cnt--) {
- src = ops->fontbuffer + (scr_readw(s--) & charmask)*cellsize;
+ src = par->fontbuffer + (scr_readw(s--) & charmask) * cellsize;
if (attr) {
ud_update_attr(buf, src, attr, vc);
@@ -119,7 +119,7 @@ static inline void ud_putcs_unaligned(struct vc_data *vc,
struct fb_image *image, u8 *buf,
u8 *dst)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
u32 shift_low = 0, mod = vc->vc_font.width % 8;
u32 shift_high = 8;
@@ -127,7 +127,7 @@ static inline void ud_putcs_unaligned(struct vc_data *vc,
u8 *src;
while (cnt--) {
- src = ops->fontbuffer + (scr_readw(s--) & charmask)*cellsize;
+ src = par->fontbuffer + (scr_readw(s--) & charmask) * cellsize;
if (attr) {
ud_update_attr(buf, src, attr, vc);
@@ -152,7 +152,7 @@ static void ud_putcs(struct vc_data *vc, struct fb_info *info,
int fg, int bg)
{
struct fb_image image;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
u32 width = (vc->vc_font.width + 7)/8;
u32 cellsize = width * vc->vc_font.height;
u32 maxcnt = info->pixmap.size/cellsize;
@@ -161,10 +161,10 @@ static void ud_putcs(struct vc_data *vc, struct fb_info *info,
u32 mod = vc->vc_font.width % 8, cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
- u32 vyres = GETVYRES(ops->p, info);
- u32 vxres = GETVXRES(ops->p, info);
+ u32 vyres = GETVYRES(par->p, info);
+ u32 vxres = GETVXRES(par->p, info);
- if (!ops->fontbuffer)
+ if (!par->fontbuffer)
return;
image.fg_color = fg;
@@ -251,29 +251,29 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
int fg, int bg)
{
struct fb_cursor cursor;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
int w = (vc->vc_font.width + 7) >> 3, c;
- int y = real_y(ops->p, vc->state.y);
+ int y = real_y(par->p, vc->state.y);
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1, dx, dy;
char *src;
- u32 vyres = GETVYRES(ops->p, info);
- u32 vxres = GETVXRES(ops->p, info);
+ u32 vyres = GETVYRES(par->p, info);
+ u32 vxres = GETVXRES(par->p, info);
- if (!ops->fontbuffer)
+ if (!par->fontbuffer)
return;
cursor.set = 0;
c = scr_readw((u16 *) vc->vc_pos);
attribute = get_attribute(info, c);
- src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.height));
+ src = par->fontbuffer + ((c & charmask) * (w * vc->vc_font.height));
- if (ops->cursor_state.image.data != src ||
- ops->cursor_reset) {
- ops->cursor_state.image.data = src;
- cursor.set |= FB_CUR_SETIMAGE;
+ if (par->cursor_state.image.data != src ||
+ par->cursor_reset) {
+ par->cursor_state.image.data = src;
+ cursor.set |= FB_CUR_SETIMAGE;
}
if (attribute) {
@@ -282,49 +282,49 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
dst = kmalloc_array(w, vc->vc_font.height, GFP_ATOMIC);
if (!dst)
return;
- kfree(ops->cursor_data);
- ops->cursor_data = dst;
+ kfree(par->cursor_data);
+ par->cursor_data = dst;
ud_update_attr(dst, src, attribute, vc);
src = dst;
}
- if (ops->cursor_state.image.fg_color != fg ||
- ops->cursor_state.image.bg_color != bg ||
- ops->cursor_reset) {
- ops->cursor_state.image.fg_color = fg;
- ops->cursor_state.image.bg_color = bg;
+ if (par->cursor_state.image.fg_color != fg ||
+ par->cursor_state.image.bg_color != bg ||
+ par->cursor_reset) {
+ par->cursor_state.image.fg_color = fg;
+ par->cursor_state.image.bg_color = bg;
cursor.set |= FB_CUR_SETCMAP;
}
- if (ops->cursor_state.image.height != vc->vc_font.height ||
- ops->cursor_state.image.width != vc->vc_font.width ||
- ops->cursor_reset) {
- ops->cursor_state.image.height = vc->vc_font.height;
- ops->cursor_state.image.width = vc->vc_font.width;
+ if (par->cursor_state.image.height != vc->vc_font.height ||
+ par->cursor_state.image.width != vc->vc_font.width ||
+ par->cursor_reset) {
+ par->cursor_state.image.height = vc->vc_font.height;
+ par->cursor_state.image.width = vc->vc_font.width;
cursor.set |= FB_CUR_SETSIZE;
}
dy = vyres - ((y * vc->vc_font.height) + vc->vc_font.height);
dx = vxres - ((vc->state.x * vc->vc_font.width) + vc->vc_font.width);
- if (ops->cursor_state.image.dx != dx ||
- ops->cursor_state.image.dy != dy ||
- ops->cursor_reset) {
- ops->cursor_state.image.dx = dx;
- ops->cursor_state.image.dy = dy;
+ if (par->cursor_state.image.dx != dx ||
+ par->cursor_state.image.dy != dy ||
+ par->cursor_reset) {
+ par->cursor_state.image.dx = dx;
+ par->cursor_state.image.dy = dy;
cursor.set |= FB_CUR_SETPOS;
}
- if (ops->cursor_state.hot.x || ops->cursor_state.hot.y ||
- ops->cursor_reset) {
- ops->cursor_state.hot.x = cursor.hot.y = 0;
+ if (par->cursor_state.hot.x || par->cursor_state.hot.y ||
+ par->cursor_reset) {
+ par->cursor_state.hot.x = cursor.hot.y = 0;
cursor.set |= FB_CUR_SETHOT;
}
if (cursor.set & FB_CUR_SETSIZE ||
- vc->vc_cursor_type != ops->p->cursor_shape ||
- ops->cursor_state.mask == NULL ||
- ops->cursor_reset) {
+ vc->vc_cursor_type != par->p->cursor_shape ||
+ par->cursor_state.mask == NULL ||
+ par->cursor_reset) {
char *mask = kmalloc_array(w, vc->vc_font.height, GFP_ATOMIC);
int cur_height, size, i = 0;
u8 msk = 0xff;
@@ -332,13 +332,13 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
if (!mask)
return;
- kfree(ops->cursor_state.mask);
- ops->cursor_state.mask = mask;
+ kfree(par->cursor_state.mask);
+ par->cursor_state.mask = mask;
- ops->p->cursor_shape = vc->vc_cursor_type;
+ par->p->cursor_shape = vc->vc_cursor_type;
cursor.set |= FB_CUR_SETSHAPE;
- switch (CUR_SIZE(ops->p->cursor_shape)) {
+ switch (CUR_SIZE(par->p->cursor_shape)) {
case CUR_NONE:
cur_height = 0;
break;
@@ -371,19 +371,19 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
mask[i++] = ~msk;
}
- ops->cursor_state.enable = enable && !use_sw;
+ par->cursor_state.enable = enable && !use_sw;
cursor.image.data = src;
- cursor.image.fg_color = ops->cursor_state.image.fg_color;
- cursor.image.bg_color = ops->cursor_state.image.bg_color;
- cursor.image.dx = ops->cursor_state.image.dx;
- cursor.image.dy = ops->cursor_state.image.dy;
- cursor.image.height = ops->cursor_state.image.height;
- cursor.image.width = ops->cursor_state.image.width;
- cursor.hot.x = ops->cursor_state.hot.x;
- cursor.hot.y = ops->cursor_state.hot.y;
- cursor.mask = ops->cursor_state.mask;
- cursor.enable = ops->cursor_state.enable;
+ cursor.image.fg_color = par->cursor_state.image.fg_color;
+ cursor.image.bg_color = par->cursor_state.image.bg_color;
+ cursor.image.dx = par->cursor_state.image.dx;
+ cursor.image.dy = par->cursor_state.image.dy;
+ cursor.image.height = par->cursor_state.image.height;
+ cursor.image.width = par->cursor_state.image.width;
+ cursor.hot.x = par->cursor_state.hot.x;
+ cursor.hot.y = par->cursor_state.hot.y;
+ cursor.mask = par->cursor_state.mask;
+ cursor.enable = par->cursor_state.enable;
cursor.image.depth = 1;
cursor.rop = ROP_XOR;
@@ -393,36 +393,41 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
if (err)
soft_cursor(info, &cursor);
- ops->cursor_reset = 0;
+ par->cursor_reset = 0;
}
static int ud_update_start(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
int xoffset, yoffset;
- u32 vyres = GETVYRES(ops->p, info);
- u32 vxres = GETVXRES(ops->p, info);
+ u32 vyres = GETVYRES(par->p, info);
+ u32 vxres = GETVXRES(par->p, info);
int err;
- xoffset = vxres - info->var.xres - ops->var.xoffset;
- yoffset = vyres - info->var.yres - ops->var.yoffset;
+ xoffset = vxres - info->var.xres - par->var.xoffset;
+ yoffset = vyres - info->var.yres - par->var.yoffset;
if (yoffset < 0)
yoffset += vyres;
- ops->var.xoffset = xoffset;
- ops->var.yoffset = yoffset;
- err = fb_pan_display(info, &ops->var);
- ops->var.xoffset = info->var.xoffset;
- ops->var.yoffset = info->var.yoffset;
- ops->var.vmode = info->var.vmode;
+ par->var.xoffset = xoffset;
+ par->var.yoffset = yoffset;
+ err = fb_pan_display(info, &par->var);
+ par->var.xoffset = info->var.xoffset;
+ par->var.yoffset = info->var.yoffset;
+ par->var.vmode = info->var.vmode;
return err;
}
-void fbcon_rotate_ud(struct fbcon_ops *ops)
+static const struct fbcon_bitops ud_fbcon_bitops = {
+ .bmove = ud_bmove,
+ .clear = ud_clear,
+ .putcs = ud_putcs,
+ .clear_margins = ud_clear_margins,
+ .cursor = ud_cursor,
+ .update_start = ud_update_start,
+ .rotate_font = fbcon_rotate_font,
+};
+
+void fbcon_set_bitops_ud(struct fbcon_par *par)
{
- ops->bmove = ud_bmove;
- ops->clear = ud_clear;
- ops->putcs = ud_putcs;
- ops->clear_margins = ud_clear_margins;
- ops->cursor = ud_cursor;
- ops->update_start = ud_update_start;
+ par->bitops = &ud_fbcon_bitops;
}
diff --git a/drivers/video/fbdev/core/softcursor.c b/drivers/video/fbdev/core/softcursor.c
index 29e5b21cf373..900788c05915 100644
--- a/drivers/video/fbdev/core/softcursor.c
+++ b/drivers/video/fbdev/core/softcursor.c
@@ -21,7 +21,7 @@
int soft_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
unsigned int scan_align = info->pixmap.scan_align - 1;
unsigned int buf_align = info->pixmap.buf_align - 1;
unsigned int i, size, dsize, s_pitch, d_pitch;
@@ -34,19 +34,19 @@ int soft_cursor(struct fb_info *info, struct fb_cursor *cursor)
s_pitch = (cursor->image.width + 7) >> 3;
dsize = s_pitch * cursor->image.height;
- if (dsize + sizeof(struct fb_image) != ops->cursor_size) {
- kfree(ops->cursor_src);
- ops->cursor_size = dsize + sizeof(struct fb_image);
+ if (dsize + sizeof(struct fb_image) != par->cursor_size) {
+ kfree(par->cursor_src);
+ par->cursor_size = dsize + sizeof(struct fb_image);
- ops->cursor_src = kmalloc(ops->cursor_size, GFP_ATOMIC);
- if (!ops->cursor_src) {
- ops->cursor_size = 0;
+ par->cursor_src = kmalloc(par->cursor_size, GFP_ATOMIC);
+ if (!par->cursor_src) {
+ par->cursor_size = 0;
return -ENOMEM;
}
}
- src = ops->cursor_src + sizeof(struct fb_image);
- image = (struct fb_image *)ops->cursor_src;
+ src = par->cursor_src + sizeof(struct fb_image);
+ image = (struct fb_image *)par->cursor_src;
*image = cursor->image;
d_pitch = (s_pitch + scan_align) & ~scan_align;
diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c
index d342b90c42b7..a9db668caf72 100644
--- a/drivers/video/fbdev/core/tileblit.c
+++ b/drivers/video/fbdev/core/tileblit.c
@@ -151,34 +151,38 @@ static void tile_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
static int tile_update_start(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
int err;
- err = fb_pan_display(info, &ops->var);
- ops->var.xoffset = info->var.xoffset;
- ops->var.yoffset = info->var.yoffset;
- ops->var.vmode = info->var.vmode;
+ err = fb_pan_display(info, &par->var);
+ par->var.xoffset = info->var.xoffset;
+ par->var.yoffset = info->var.yoffset;
+ par->var.vmode = info->var.vmode;
return err;
}
+static const struct fbcon_bitops tile_fbcon_bitops = {
+ .bmove = tile_bmove,
+ .clear = tile_clear,
+ .putcs = tile_putcs,
+ .clear_margins = tile_clear_margins,
+ .cursor = tile_cursor,
+ .update_start = tile_update_start,
+};
+
void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info)
{
struct fb_tilemap map;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
- ops->bmove = tile_bmove;
- ops->clear = tile_clear;
- ops->putcs = tile_putcs;
- ops->clear_margins = tile_clear_margins;
- ops->cursor = tile_cursor;
- ops->update_start = tile_update_start;
+ par->bitops = &tile_fbcon_bitops;
- if (ops->p) {
+ if (par->p) {
map.width = vc->vc_font.width;
map.height = vc->vc_font.height;
map.depth = 1;
map.length = vc->vc_font.charcount;
- map.data = ops->p->fontdata;
+ map.data = par->p->fontdata;
info->tileops->fb_settile(info, &map);
}
}
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 6acf5a00c2ba..92595af022eb 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -13,18 +13,18 @@
*/
#include <linux/aperture.h>
+#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/fb.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/platform_data/simplefb.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_clk.h>
#include <linux/of_platform.h>
#include <linux/of_reserved_mem.h>
#include <linux/parser.h>
+#include <linux/platform_data/simplefb.h>
+#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index 6a46baa0737c..336f062e1f9d 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -143,9 +143,15 @@ struct dw_hdmi_plat_data {
const struct drm_display_info *info,
const struct drm_display_mode *mode);
+ /*
+ * priv_audio is specially used for additional audio device to get
+ * driver data through this dw_hdmi_plat_data.
+ */
+ void *priv_audio;
+
/* Platform-specific audio enable/disable (optional) */
void (*enable_audio)(struct dw_hdmi *hdmi, int channel,
- int width, int rate, int non_pcm);
+ int width, int rate, int non_pcm, int iec958);
void (*disable_audio)(struct dw_hdmi *hdmi);
/* Vendor PHY support */
@@ -179,6 +185,7 @@ void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense);
int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn,
struct device *codec_dev);
void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm);
+void dw_hdmi_set_sample_iec958(struct dw_hdmi *hdmi, unsigned int iec958);
void dw_hdmi_set_sample_width(struct dw_hdmi *hdmi, unsigned int width);
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt);
@@ -208,4 +215,6 @@ void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data);
bool dw_hdmi_bus_fmt_is_420(struct dw_hdmi *hdmi);
+const struct dw_hdmi_plat_data *dw_hdmi_to_plat_data(struct dw_hdmi *hdmi);
+
#endif /* __IMX_HDMI_H__ */
diff --git a/include/drm/bridge/dw_hdmi_qp.h b/include/drm/bridge/dw_hdmi_qp.h
index e9be6d507ad9..76ecf3130199 100644
--- a/include/drm/bridge/dw_hdmi_qp.h
+++ b/include/drm/bridge/dw_hdmi_qp.h
@@ -23,6 +23,8 @@ struct dw_hdmi_qp_plat_data {
const struct dw_hdmi_qp_phy_ops *phy_ops;
void *phy_data;
int main_irq;
+ int cec_irq;
+ unsigned long ref_clk_rate;
};
struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index 811e9238a77c..cf318e3ddb5c 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -115,6 +115,7 @@
#define DP_MAX_LANE_COUNT 0x002
# define DP_MAX_LANE_COUNT_MASK 0x1f
+# define DP_POST_LT_ADJ_REQ_SUPPORTED (1 << 5) /* 1.3 */
# define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */
# define DP_ENHANCED_FRAME_CAP (1 << 7)
@@ -583,6 +584,7 @@
#define DP_LANE_COUNT_SET 0x101
# define DP_LANE_COUNT_MASK 0x0f
+# define DP_POST_LT_ADJ_REQ_GRANTED (1 << 5) /* 1.3 */
# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
#define DP_TRAINING_PATTERN_SET 0x102
@@ -800,6 +802,7 @@
#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
#define DP_INTERLANE_ALIGN_DONE (1 << 0)
+#define DP_POST_LT_ADJ_REQ_IN_PROGRESS (1 << 1) /* 1.3 */
#define DP_128B132B_DPRX_EQ_INTERLANE_ALIGN_DONE (1 << 2) /* 2.0 E11 */
#define DP_128B132B_DPRX_CDS_INTERLANE_ALIGN_DONE (1 << 3) /* 2.0 E11 */
#define DP_128B132B_LT_FAILED (1 << 4) /* 2.0 E11 */
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index 87caa4f1fdb8..52ce28097015 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -37,6 +37,7 @@ bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count);
bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count);
+bool drm_dp_post_lt_adj_req_in_progress(const u8 link_status[DP_LINK_STATUS_SIZE]);
u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
@@ -156,6 +157,13 @@ drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
}
static inline bool
+drm_dp_post_lt_adj_req_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x13 &&
+ (dpcd[DP_MAX_LANE_COUNT] & DP_POST_LT_ADJ_REQ_SUPPORTED);
+}
+
+static inline bool
drm_dp_fast_training_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
return dpcd[DP_DPCD_REV] >= 0x11 &&
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 38636a593c9d..155e82f87e4d 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -159,12 +159,44 @@ struct drm_crtc_commit {
struct __drm_planes_state {
struct drm_plane *ptr;
- struct drm_plane_state *state, *old_state, *new_state;
+
+ /**
+ * @state_to_destroy:
+ *
+ * Used to track the @drm_plane_state we will need to free when
+ * tearing down the associated &drm_atomic_state in
+ * $drm_mode_config_funcs.atomic_state_clear or
+ * drm_atomic_state_default_clear().
+ *
+ * Before a commit, and the call to
+ * drm_atomic_helper_swap_state() in particular, it points to
+ * the same state than @new_state. After a commit, it points to
+ * the same state than @old_state.
+ */
+ struct drm_plane_state *state_to_destroy;
+
+ struct drm_plane_state *old_state, *new_state;
};
struct __drm_crtcs_state {
struct drm_crtc *ptr;
- struct drm_crtc_state *state, *old_state, *new_state;
+
+ /**
+ * @state_to_destroy:
+ *
+ * Used to track the @drm_crtc_state we will need to free when
+ * tearing down the associated &drm_atomic_state in
+ * $drm_mode_config_funcs.atomic_state_clear or
+ * drm_atomic_state_default_clear().
+ *
+ * Before a commit, and the call to
+ * drm_atomic_helper_swap_state() in particular, it points to
+ * the same state than @new_state. After a commit, it points to
+ * the same state than @old_state.
+ */
+ struct drm_crtc_state *state_to_destroy;
+
+ struct drm_crtc_state *old_state, *new_state;
/**
* @commit:
@@ -182,7 +214,24 @@ struct __drm_crtcs_state {
struct __drm_connnectors_state {
struct drm_connector *ptr;
- struct drm_connector_state *state, *old_state, *new_state;
+
+ /**
+ * @state_to_destroy:
+ *
+ * Used to track the @drm_connector_state we will need to free
+ * when tearing down the associated &drm_atomic_state in
+ * $drm_mode_config_funcs.atomic_state_clear or
+ * drm_atomic_state_default_clear().
+ *
+ * Before a commit, and the call to
+ * drm_atomic_helper_swap_state() in particular, it points to
+ * the same state than @new_state. After a commit, it points to
+ * the same state than @old_state.
+ */
+ struct drm_connector_state *state_to_destroy;
+
+ struct drm_connector_state *old_state, *new_state;
+
/**
* @out_fence_ptr:
*
@@ -342,7 +391,23 @@ struct drm_private_state {
struct __drm_private_objs_state {
struct drm_private_obj *ptr;
- struct drm_private_state *state, *old_state, *new_state;
+
+ /**
+ * @state_to_destroy:
+ *
+ * Used to track the @drm_private_state we will need to free
+ * when tearing down the associated &drm_atomic_state in
+ * $drm_mode_config_funcs.atomic_state_clear or
+ * drm_atomic_state_default_clear().
+ *
+ * Before a commit, and the call to
+ * drm_atomic_helper_swap_state() in particular, it points to
+ * the same state than @new_state. After a commit, it points to
+ * the same state than @old_state.
+ */
+ struct drm_private_state *state_to_destroy;
+
+ struct drm_private_state *old_state, *new_state;
};
/**
@@ -637,24 +702,6 @@ drm_atomic_get_new_crtc_for_encoder(struct drm_atomic_state *state,
struct drm_encoder *encoder);
/**
- * drm_atomic_get_existing_crtc_state - get CRTC state, if it exists
- * @state: global atomic state object
- * @crtc: CRTC to grab
- *
- * This function returns the CRTC state for the given CRTC, or NULL
- * if the CRTC is not part of the global atomic state.
- *
- * This function is deprecated, @drm_atomic_get_old_crtc_state or
- * @drm_atomic_get_new_crtc_state should be used instead.
- */
-static inline struct drm_crtc_state *
-drm_atomic_get_existing_crtc_state(const struct drm_atomic_state *state,
- struct drm_crtc *crtc)
-{
- return state->crtcs[drm_crtc_index(crtc)].state;
-}
-
-/**
* drm_atomic_get_old_crtc_state - get old CRTC state, if it exists
* @state: global atomic state object
* @crtc: CRTC to grab
@@ -684,24 +731,6 @@ drm_atomic_get_new_crtc_state(const struct drm_atomic_state *state,
}
/**
- * drm_atomic_get_existing_plane_state - get plane state, if it exists
- * @state: global atomic state object
- * @plane: plane to grab
- *
- * This function returns the plane state for the given plane, or NULL
- * if the plane is not part of the global atomic state.
- *
- * This function is deprecated, @drm_atomic_get_old_plane_state or
- * @drm_atomic_get_new_plane_state should be used instead.
- */
-static inline struct drm_plane_state *
-drm_atomic_get_existing_plane_state(const struct drm_atomic_state *state,
- struct drm_plane *plane)
-{
- return state->planes[drm_plane_index(plane)].state;
-}
-
-/**
* drm_atomic_get_old_plane_state - get plane state, if it exists
* @state: global atomic state object
* @plane: plane to grab
@@ -732,29 +761,6 @@ drm_atomic_get_new_plane_state(const struct drm_atomic_state *state,
}
/**
- * drm_atomic_get_existing_connector_state - get connector state, if it exists
- * @state: global atomic state object
- * @connector: connector to grab
- *
- * This function returns the connector state for the given connector,
- * or NULL if the connector is not part of the global atomic state.
- *
- * This function is deprecated, @drm_atomic_get_old_connector_state or
- * @drm_atomic_get_new_connector_state should be used instead.
- */
-static inline struct drm_connector_state *
-drm_atomic_get_existing_connector_state(const struct drm_atomic_state *state,
- struct drm_connector *connector)
-{
- int index = drm_connector_index(connector);
-
- if (index >= state->num_connector)
- return NULL;
-
- return state->connectors[index].state;
-}
-
-/**
* drm_atomic_get_old_connector_state - get connector state, if it exists
* @state: global atomic state object
* @connector: connector to grab
@@ -799,11 +805,11 @@ drm_atomic_get_new_connector_state(const struct drm_atomic_state *state,
* @state: global atomic state object
* @plane: plane to grab
*
- * This function returns the plane state for the given plane, either from
- * @state, or if the plane isn't part of the atomic state update, from @plane.
- * This is useful in atomic check callbacks, when drivers need to peek at, but
- * not change, state of other planes, since it avoids threading an error code
- * back up the call chain.
+ * This function returns the plane state for the given plane, either the
+ * new plane state from @state, or if the plane isn't part of the atomic
+ * state update, from @plane. This is useful in atomic check callbacks,
+ * when drivers need to peek at, but not change, state of other planes,
+ * since it avoids threading an error code back up the call chain.
*
* WARNING:
*
@@ -824,9 +830,15 @@ static inline const struct drm_plane_state *
__drm_atomic_get_current_plane_state(const struct drm_atomic_state *state,
struct drm_plane *plane)
{
- if (state->planes[drm_plane_index(plane)].state)
- return state->planes[drm_plane_index(plane)].state;
+ struct drm_plane_state *plane_state;
+
+ plane_state = drm_atomic_get_new_plane_state(state, plane);
+ if (plane_state)
+ return plane_state;
+ /*
+ * If the plane isn't part of the state, fallback to the currently active one.
+ */
return plane->state;
}
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 76e05930f50e..0ff7ab4aa868 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -1362,6 +1362,13 @@ drm_bridge_get_current_state(struct drm_bridge *bridge)
* drm_bridge_get_next_bridge() - Get the next bridge in the chain
* @bridge: bridge object
*
+ * The caller is responsible of having a reference to @bridge via
+ * drm_bridge_get() or equivalent. This function leaves the refcount of
+ * @bridge unmodified.
+ *
+ * The refcount of the returned bridge is incremented. Use drm_bridge_put()
+ * when done with it.
+ *
* RETURNS:
* the next bridge in the chain after @bridge, or NULL if @bridge is the last.
*/
@@ -1371,7 +1378,7 @@ drm_bridge_get_next_bridge(struct drm_bridge *bridge)
if (list_is_last(&bridge->chain_node, &bridge->encoder->bridge_chain))
return NULL;
- return list_next_entry(bridge, chain_node);
+ return drm_bridge_get(list_next_entry(bridge, chain_node));
}
/**
@@ -1434,15 +1441,61 @@ drm_bridge_chain_get_last_bridge(struct drm_encoder *encoder)
}
/**
- * drm_for_each_bridge_in_chain() - Iterate over all bridges present in a chain
+ * drm_bridge_get_next_bridge_and_put - Get the next bridge in the chain
+ * and put the previous
+ * @bridge: bridge object
+ *
+ * Same as drm_bridge_get_next_bridge() but additionally puts the @bridge.
+ *
+ * RETURNS:
+ * the next bridge in the chain after @bridge, or NULL if @bridge is the last.
+ */
+static inline struct drm_bridge *
+drm_bridge_get_next_bridge_and_put(struct drm_bridge *bridge)
+{
+ struct drm_bridge *next = drm_bridge_get_next_bridge(bridge);
+
+ drm_bridge_put(bridge);
+
+ return next;
+}
+
+/**
+ * drm_for_each_bridge_in_chain_scoped - iterate over all bridges attached
+ * to an encoder
* @encoder: the encoder to iterate bridges on
* @bridge: a bridge pointer updated to point to the current bridge at each
* iteration
*
* Iterate over all bridges present in the bridge chain attached to @encoder.
+ *
+ * Automatically gets/puts the bridge reference while iterating, and puts
+ * the reference even if returning or breaking in the middle of the loop.
+ */
+#define drm_for_each_bridge_in_chain_scoped(encoder, bridge) \
+ for (struct drm_bridge *bridge __free(drm_bridge_put) = \
+ drm_bridge_chain_get_first_bridge(encoder); \
+ bridge; \
+ bridge = drm_bridge_get_next_bridge_and_put(bridge))
+
+/**
+ * drm_for_each_bridge_in_chain_from - iterate over all bridges starting
+ * from the given bridge
+ * @first_bridge: the bridge to start from
+ * @bridge: a bridge pointer updated to point to the current bridge at each
+ * iteration
+ *
+ * Iterate over all bridges in the encoder chain starting from
+ * @first_bridge, included.
+ *
+ * Automatically gets/puts the bridge reference while iterating, and puts
+ * the reference even if returning or breaking in the middle of the loop.
*/
-#define drm_for_each_bridge_in_chain(encoder, bridge) \
- list_for_each_entry(bridge, &(encoder)->bridge_chain, chain_node)
+#define drm_for_each_bridge_in_chain_from(first_bridge, bridge) \
+ for (struct drm_bridge *bridge __free(drm_bridge_put) = \
+ drm_bridge_get(first_bridge); \
+ bridge; \
+ bridge = drm_bridge_get_next_bridge_and_put(bridge))
enum drm_mode_status
drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
index 04afd7c21a82..c2e05a281252 100644
--- a/include/drm/drm_buddy.h
+++ b/include/drm/drm_buddy.h
@@ -10,6 +10,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/rbtree.h>
#include <drm/drm_print.h>
@@ -44,7 +45,11 @@ struct drm_buddy_block {
* a list, if so desired. As soon as the block is freed with
* drm_buddy_free* ownership is given back to the mm.
*/
- struct list_head link;
+ union {
+ struct rb_node rb;
+ struct list_head link;
+ };
+
struct list_head tmp_link;
};
@@ -59,7 +64,7 @@ struct drm_buddy_block {
*/
struct drm_buddy {
/* Maintain a free list for each order. */
- struct list_head *free_list;
+ struct rb_root **free_trees;
/*
* Maintain explicit binary tree(s) to track the allocation of the
@@ -85,7 +90,7 @@ struct drm_buddy {
};
static inline u64
-drm_buddy_block_offset(struct drm_buddy_block *block)
+drm_buddy_block_offset(const struct drm_buddy_block *block)
{
return block->header & DRM_BUDDY_HEADER_OFFSET;
}
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index 146ca80e35db..3556928d3938 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -70,13 +70,8 @@ struct drm_client_funcs {
* Called when suspending the device.
*
* This callback is optional.
- *
- * FIXME: Some callers hold the console lock when invoking this
- * function. This interferes with fbdev emulation, which
- * also tries to acquire the lock. Push the console lock
- * into the callback and remove 'holds_console_lock'.
*/
- int (*suspend)(struct drm_client_dev *client, bool holds_console_lock);
+ int (*suspend)(struct drm_client_dev *client);
/**
* @resume:
@@ -84,13 +79,8 @@ struct drm_client_funcs {
* Called when resuming the device from suspend.
*
* This callback is optional.
- *
- * FIXME: Some callers hold the console lock when invoking this
- * function. This interferes with fbdev emulation, which
- * also tries to acquire the lock. Push the console lock
- * into the callback and remove 'holds_console_lock'.
*/
- int (*resume)(struct drm_client_dev *client, bool holds_console_lock);
+ int (*resume)(struct drm_client_dev *client);
};
/**
@@ -220,6 +210,7 @@ int drm_client_modeset_check(struct drm_client_dev *client);
int drm_client_modeset_commit_locked(struct drm_client_dev *client);
int drm_client_modeset_commit(struct drm_client_dev *client);
int drm_client_modeset_dpms(struct drm_client_dev *client, int mode);
+int drm_client_modeset_wait_for_vblank(struct drm_client_dev *client, unsigned int crtc_index);
/**
* drm_client_for_each_modeset() - Iterate over client modesets
diff --git a/include/drm/drm_client_event.h b/include/drm/drm_client_event.h
index 1d544d3a3228..985d6f02a4c4 100644
--- a/include/drm/drm_client_event.h
+++ b/include/drm/drm_client_event.h
@@ -11,8 +11,8 @@ struct drm_device;
void drm_client_dev_unregister(struct drm_device *dev);
void drm_client_dev_hotplug(struct drm_device *dev);
void drm_client_dev_restore(struct drm_device *dev);
-void drm_client_dev_suspend(struct drm_device *dev, bool holds_console_lock);
-void drm_client_dev_resume(struct drm_device *dev, bool holds_console_lock);
+void drm_client_dev_suspend(struct drm_device *dev);
+void drm_client_dev_resume(struct drm_device *dev);
#else
static inline void drm_client_dev_unregister(struct drm_device *dev)
{ }
@@ -20,9 +20,9 @@ static inline void drm_client_dev_hotplug(struct drm_device *dev)
{ }
static inline void drm_client_dev_restore(struct drm_device *dev)
{ }
-static inline void drm_client_dev_suspend(struct drm_device *dev, bool holds_console_lock)
+static inline void drm_client_dev_suspend(struct drm_device *dev)
{ }
-static inline void drm_client_dev_resume(struct drm_device *dev, bool holds_console_lock)
+static inline void drm_client_dev_resume(struct drm_device *dev)
{ }
#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index caa56e039da2..2d2a0bd526cf 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -186,7 +186,7 @@ struct drm_crtc_state {
* this case the driver will send the VBLANK event on its own when the
* writeback job is complete.
*/
- bool no_vblank : 1;
+ bool no_vblank;
/**
* @plane_mask: Bitmask of drm_plane_mask(plane) of planes attached to
diff --git a/include/drm/drm_dumb_buffers.h b/include/drm/drm_dumb_buffers.h
new file mode 100644
index 000000000000..1f3a8236fb3d
--- /dev/null
+++ b/include/drm/drm_dumb_buffers.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef __DRM_DUMB_BUFFERS_H__
+#define __DRM_DUMB_BUFFERS_H__
+
+struct drm_device;
+struct drm_mode_create_dumb;
+
+int drm_mode_size_dumb(struct drm_device *dev,
+ struct drm_mode_create_dumb *args,
+ unsigned long hw_pitch_align,
+ unsigned long hw_size_align);
+
+#endif
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index 32d57d6c5327..2b5c1aef80b0 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -128,10 +128,6 @@ void drm_fb_argb8888_to_argb4444(struct iosys_map *dst, const unsigned int *dst_
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, struct drm_format_conv_state *state);
-int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format,
- const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, struct drm_format_conv_state *state);
-
void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, struct drm_format_conv_state *state);
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 92f5db84b9c2..589f7bfe7506 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -107,10 +107,12 @@ struct drm_gem_shmem_object {
#define to_drm_gem_shmem_obj(obj) \
container_of(obj, struct drm_gem_shmem_object, base)
+int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size);
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev,
size_t size,
struct vfsmount *gemfs);
+void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem);
diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h
index b92faa9a26b2..632e100e6efb 100644
--- a/include/drm/drm_gpusvm.h
+++ b/include/drm/drm_gpusvm.h
@@ -235,6 +235,9 @@ struct drm_gpusvm {
* @read_only: operating on read-only memory
* @devmem_possible: possible to use device memory
* @devmem_only: use only device memory
+ * @allow_mixed: Allow mixed mappings in get pages. Mixing between system and
+ * single dpagemap is supported, mixing between multiple dpagemap
+ * is unsupported.
*
* Context that is DRM GPUSVM is operating in (i.e. user arguments).
*/
@@ -246,6 +249,7 @@ struct drm_gpusvm_ctx {
unsigned int read_only :1;
unsigned int devmem_possible :1;
unsigned int devmem_only :1;
+ unsigned int allow_mixed :1;
};
int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index ce7c7aeac887..fe32854b7ffe 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -490,6 +490,18 @@ struct drm_crtc_helper_funcs {
bool in_vblank_irq, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode);
+
+ /**
+ * @handle_vblank_timeout: Handles timeouts of the vblank timer.
+ *
+ * Called by CRTC's the vblank timer on each timeout. Semantics is
+ * equivalient to drm_crtc_handle_vblank(). Implementations should
+ * invoke drm_crtc_handle_vblank() as part of processing the timeout.
+ *
+ * This callback is optional. If unset, the vblank timer invokes
+ * drm_crtc_handle_vblank() directly.
+ */
+ bool (*handle_vblank_timeout)(struct drm_crtc *crtc);
};
/**
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index 151ab1e85b1b..ffa564d79638 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -25,6 +25,7 @@
#define _DRM_VBLANK_H_
#include <linux/seqlock.h>
+#include <linux/hrtimer.h>
#include <linux/idr.h>
#include <linux/poll.h>
#include <linux/kthread.h>
@@ -104,6 +105,28 @@ struct drm_vblank_crtc_config {
};
/**
+ * struct drm_vblank_crtc_timer - vblank timer for a CRTC
+ */
+struct drm_vblank_crtc_timer {
+ /**
+ * @timer: The vblank's high-resolution timer
+ */
+ struct hrtimer timer;
+ /**
+ * @interval_lock: Protects @interval
+ */
+ spinlock_t interval_lock;
+ /**
+ * @interval: Duration between two vblanks
+ */
+ ktime_t interval;
+ /**
+ * @crtc: The timer's CRTC
+ */
+ struct drm_crtc *crtc;
+};
+
+/**
* struct drm_vblank_crtc - vblank tracking for a CRTC
*
* This structure tracks the vblank state for one CRTC.
@@ -254,6 +277,11 @@ struct drm_vblank_crtc {
* cancelled.
*/
wait_queue_head_t work_wait_queue;
+
+ /**
+ * @vblank_timer: Holds the state of the vblank timer
+ */
+ struct drm_vblank_crtc_timer vblank_timer;
};
struct drm_vblank_crtc *drm_crtc_vblank_crtc(struct drm_crtc *crtc);
@@ -290,6 +318,10 @@ wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc);
void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
u32 max_vblank_count);
+int drm_crtc_vblank_start_timer(struct drm_crtc *crtc);
+void drm_crtc_vblank_cancel_timer(struct drm_crtc *crtc);
+void drm_crtc_vblank_get_vblank_timeout(struct drm_crtc *crtc, ktime_t *vblank_time);
+
/*
* Helpers for struct drm_crtc_funcs
*/
diff --git a/include/drm/drm_vblank_helper.h b/include/drm/drm_vblank_helper.h
new file mode 100644
index 000000000000..fcd8a9b35846
--- /dev/null
+++ b/include/drm/drm_vblank_helper.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _DRM_VBLANK_HELPER_H_
+#define _DRM_VBLANK_HELPER_H_
+
+#include <linux/hrtimer_types.h>
+#include <linux/types.h>
+
+struct drm_atomic_state;
+struct drm_crtc;
+
+/*
+ * VBLANK helpers
+ */
+
+void drm_crtc_vblank_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
+void drm_crtc_vblank_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
+void drm_crtc_vblank_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *crtc_state);
+
+/**
+ * DRM_CRTC_HELPER_VBLANK_FUNCS - Default implementation for VBLANK helpers
+ *
+ * This macro initializes struct &drm_crtc_helper_funcs to default helpers
+ * for VBLANK handling.
+ */
+#define DRM_CRTC_HELPER_VBLANK_FUNCS \
+ .atomic_flush = drm_crtc_vblank_atomic_flush, \
+ .atomic_enable = drm_crtc_vblank_atomic_enable, \
+ .atomic_disable = drm_crtc_vblank_atomic_disable
+
+/*
+ * VBLANK timer
+ */
+
+int drm_crtc_vblank_helper_enable_vblank_timer(struct drm_crtc *crtc);
+void drm_crtc_vblank_helper_disable_vblank_timer(struct drm_crtc *crtc);
+bool drm_crtc_vblank_helper_get_vblank_timestamp_from_timer(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq);
+
+/**
+ * DRM_CRTC_VBLANK_TIMER_FUNCS - Default implementation for VBLANK timers
+ *
+ * This macro initializes struct &drm_crtc_funcs to default helpers for
+ * VBLANK timers.
+ */
+#define DRM_CRTC_VBLANK_TIMER_FUNCS \
+ .enable_vblank = drm_crtc_vblank_helper_enable_vblank_timer, \
+ .disable_vblank = drm_crtc_vblank_helper_disable_vblank_timer, \
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp_from_timer
+
+#endif
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 323a505e6e6a..fb88301b3c45 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -546,7 +546,7 @@ struct drm_sched_backend_ops {
* @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT,
* as there's usually one run-queue per priority, but could be less.
* @sched_rq: An allocated array of run-queues of size @num_rqs;
- * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
+ * @job_scheduled: once drm_sched_entity_flush() is called the scheduler
* waits on this wait queue until all the scheduled jobs are
* finished.
* @job_id_count: used to assign unique id to the each job.
diff --git a/include/drm/intel/pciids.h b/include/drm/intel/pciids.h
index da6301a6fcea..9f095a99d6c9 100644
--- a/include/drm/intel/pciids.h
+++ b/include/drm/intel/pciids.h
@@ -849,7 +849,7 @@
MACRO__(0x64B0, ## __VA_ARGS__)
/* BMG */
-#define INTEL_BMG_IDS(MACRO__, ...) \
+#define INTEL_BMG_G21_IDS(MACRO__, ...) \
MACRO__(0xE202, ## __VA_ARGS__), \
MACRO__(0xE209, ## __VA_ARGS__), \
MACRO__(0xE20B, ## __VA_ARGS__), \
@@ -858,7 +858,10 @@
MACRO__(0xE210, ## __VA_ARGS__), \
MACRO__(0xE211, ## __VA_ARGS__), \
MACRO__(0xE212, ## __VA_ARGS__), \
- MACRO__(0xE216, ## __VA_ARGS__), \
+ MACRO__(0xE216, ## __VA_ARGS__)
+
+#define INTEL_BMG_IDS(MACRO__, ...) \
+ INTEL_BMG_G21_IDS(MACRO__, __VA_ARGS__), \
MACRO__(0xE220, ## __VA_ARGS__), \
MACRO__(0xE221, ## __VA_ARGS__), \
MACRO__(0xE222, ## __VA_ARGS__), \
@@ -881,4 +884,13 @@
MACRO__(0xFD80, ## __VA_ARGS__), \
MACRO__(0xFD81, ## __VA_ARGS__)
+/* NVL-S */
+#define INTEL_NVLS_IDS(MACRO__, ...) \
+ MACRO__(0xD740, ## __VA_ARGS__), \
+ MACRO__(0xD741, ## __VA_ARGS__), \
+ MACRO__(0xD742, ## __VA_ARGS__), \
+ MACRO__(0xD743, ## __VA_ARGS__), \
+ MACRO__(0xD744, ## __VA_ARGS__), \
+ MACRO__(0xD745, ## __VA_ARGS__)
+
#endif /* __PCIIDS_H__ */
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index e664a96540eb..bca3a8849d47 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -391,7 +391,7 @@ int ttm_bo_wait_ctx(struct ttm_buffer_object *bo,
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_operation_ctx *ctx);
-void ttm_bo_put(struct ttm_buffer_object *bo);
+void ttm_bo_fini(struct ttm_buffer_object *bo);
void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
struct ttm_lru_bulk_move *bulk);
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
diff --git a/include/linux/dma-buf/heaps/cma.h b/include/linux/dma-buf/heaps/cma.h
new file mode 100644
index 000000000000..e751479e21e7
--- /dev/null
+++ b/include/linux/dma-buf/heaps/cma.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef DMA_BUF_HEAP_CMA_H_
+#define DMA_BUF_HEAP_CMA_H_
+
+struct cma;
+
+#ifdef CONFIG_DMABUF_HEAPS_CMA
+int dma_heap_cma_register_heap(struct cma *cma);
+#else
+static inline int dma_heap_cma_register_heap(struct cma *cma)
+{
+ return 0;
+}
+#endif // CONFIG_DMABUF_HEAPS_CMA
+
+#endif // DMA_BUF_HEAP_CMA_H_
diff --git a/include/sound/asoundef.h b/include/sound/asoundef.h
index 09b2c3dffb30..c4a929d4fd51 100644
--- a/include/sound/asoundef.h
+++ b/include/sound/asoundef.h
@@ -12,6 +12,15 @@
* Digital audio interface *
* *
****************************************************************************/
+/* IEC958 subframe format */
+#define IEC958_SUBFRAME_PREAMBLE_MASK (0xfU)
+#define IEC958_SUBFRAME_AUXILIARY_MASK (0xfU << 4)
+#define IEC958_SUBFRAME_SAMPLE_24_MASK (0xffffffU << 4)
+#define IEC958_SUBFRAME_SAMPLE_20_MASK (0xfffffU << 8)
+#define IEC958_SUBFRAME_VALIDITY (0x1U << 28)
+#define IEC958_SUBFRAME_USER_DATA (0x1U << 29)
+#define IEC958_SUBFRAME_CHANNEL_STATUS (0x1U << 30)
+#define IEC958_SUBFRAME_PARITY (0x1U << 31)
/* AES/IEC958 channel status bits */
#define IEC958_AES0_PROFESSIONAL (1<<0) /* 0 = consumer, 1 = professional */
diff --git a/include/uapi/drm/amdxdna_accel.h b/include/uapi/drm/amdxdna_accel.h
index a1fb9785db77..c7eec9ceb2ae 100644
--- a/include/uapi/drm/amdxdna_accel.h
+++ b/include/uapi/drm/amdxdna_accel.h
@@ -523,7 +523,20 @@ struct amdxdna_drm_hwctx_entry {
__u32 pad;
};
+/**
+ * struct amdxdna_async_error - XDNA async error structure
+ */
+struct amdxdna_async_error {
+ /** @err_code: Error code. */
+ __u64 err_code;
+ /** @ts_us: Timestamp. */
+ __u64 ts_us;
+ /** @ex_err_code: Extra error code */
+ __u64 ex_err_code;
+};
+
#define DRM_AMDXDNA_HW_CONTEXT_ALL 0
+#define DRM_AMDXDNA_HW_LAST_ASYNC_ERR 2
/**
* struct amdxdna_drm_get_array - Get information array.
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index a122bea25593..1e0e02a79b5c 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -1066,7 +1066,7 @@ struct drm_mode_crtc_page_flip_target {
* struct drm_mode_create_dumb - Create a KMS dumb buffer for scanout.
* @height: buffer height in pixels
* @width: buffer width in pixels
- * @bpp: bits per pixel
+ * @bpp: color mode
* @flags: must be zero
* @handle: buffer object handle
* @pitch: number of bytes between two consecutive lines
@@ -1074,6 +1074,54 @@ struct drm_mode_crtc_page_flip_target {
*
* User-space fills @height, @width, @bpp and @flags. If the IOCTL succeeds,
* the kernel fills @handle, @pitch and @size.
+ *
+ * The value of @bpp is a color-mode number describing a specific format
+ * or a variant thereof. The value often corresponds to the number of bits
+ * per pixel for most modes, although there are exceptions. Each color mode
+ * maps to a DRM format plus a number of modes with similar pixel layout.
+ * Framebuffer layout is always linear.
+ *
+ * Support for all modes and formats is optional. Even if dumb-buffer
+ * creation with a certain color mode succeeds, it is not guaranteed that
+ * the DRM driver supports any of the related formats. Most drivers support
+ * a color mode of 32 with a format of DRM_FORMAT_XRGB8888 on their primary
+ * plane.
+ *
+ * +------------+------------------------+------------------------+
+ * | Color mode | Framebuffer format | Compatible formats |
+ * +============+========================+========================+
+ * | 32 | * DRM_FORMAT_XRGB8888 | * DRM_FORMAT_BGRX8888 |
+ * | | | * DRM_FORMAT_RGBX8888 |
+ * | | | * DRM_FORMAT_XBGR8888 |
+ * +------------+------------------------+------------------------+
+ * | 24 | * DRM_FORMAT_RGB888 | * DRM_FORMAT_BGR888 |
+ * +------------+------------------------+------------------------+
+ * | 16 | * DRM_FORMAT_RGB565 | * DRM_FORMAT_BGR565 |
+ * +------------+------------------------+------------------------+
+ * | 15 | * DRM_FORMAT_XRGB1555 | * DRM_FORMAT_BGRX1555 |
+ * | | | * DRM_FORMAT_RGBX1555 |
+ * | | | * DRM_FORMAT_XBGR1555 |
+ * +------------+------------------------+------------------------+
+ * | 8 | * DRM_FORMAT_C8 | * DRM_FORMAT_D8 |
+ * | | | * DRM_FORMAT_R8 |
+ * +------------+------------------------+------------------------+
+ * | 4 | * DRM_FORMAT_C4 | * DRM_FORMAT_D4 |
+ * | | | * DRM_FORMAT_R4 |
+ * +------------+------------------------+------------------------+
+ * | 2 | * DRM_FORMAT_C2 | * DRM_FORMAT_D2 |
+ * | | | * DRM_FORMAT_R2 |
+ * +------------+------------------------+------------------------+
+ * | 1 | * DRM_FORMAT_C1 | * DRM_FORMAT_D1 |
+ * | | | * DRM_FORMAT_R1 |
+ * +------------+------------------------+------------------------+
+ *
+ * Color modes of 10, 12, 15, 30 and 64 are only supported for use by
+ * legacy user space. Please don't use them in new code. Other modes
+ * are not support.
+ *
+ * Do not attempt to allocate anything but linear framebuffer memory
+ * with single-plane RGB data. Allocation of other framebuffer
+ * layouts requires dedicated ioctls in the respective DRM driver.
*/
struct drm_mode_create_dumb {
__u32 height;
diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h
index 160ee1411d4a..e470b0221e02 100644
--- a/include/uapi/drm/ivpu_accel.h
+++ b/include/uapi/drm/ivpu_accel.h
@@ -90,6 +90,7 @@ extern "C" {
#define DRM_IVPU_PARAM_TILE_CONFIG 11
#define DRM_IVPU_PARAM_SKU 12
#define DRM_IVPU_PARAM_CAPABILITIES 13
+#define DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE 14
#define DRM_IVPU_PLATFORM_TYPE_SILICON 0
@@ -176,6 +177,9 @@ struct drm_ivpu_param {
*
* %DRM_IVPU_PARAM_CAPABILITIES:
* Supported capabilities (read-only)
+ *
+ * %DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE:
+ * Size of the preemption buffer (read-only)
*/
__u32 param;
@@ -371,6 +375,13 @@ struct drm_ivpu_cmdq_submit {
* to be executed. The offset has to be 8-byte aligned.
*/
__u32 commands_offset;
+ /**
+ * @preempt_buffer_index:
+ *
+ * Index of the preemption buffer in the buffers_ptr array.
+ */
+ __u32 preempt_buffer_index;
+ __u32 reserved;
};
/* drm_ivpu_bo_wait job status codes */
diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
index ed67510395bd..e8b47c9f6976 100644
--- a/include/uapi/drm/panfrost_drm.h
+++ b/include/uapi/drm/panfrost_drm.h
@@ -22,6 +22,8 @@ extern "C" {
#define DRM_PANFROST_PERFCNT_DUMP 0x07
#define DRM_PANFROST_MADVISE 0x08
#define DRM_PANFROST_SET_LABEL_BO 0x09
+#define DRM_PANFROST_JM_CTX_CREATE 0x0a
+#define DRM_PANFROST_JM_CTX_DESTROY 0x0b
#define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
#define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
@@ -31,6 +33,8 @@ extern "C" {
#define DRM_IOCTL_PANFROST_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset)
#define DRM_IOCTL_PANFROST_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MADVISE, struct drm_panfrost_madvise)
#define DRM_IOCTL_PANFROST_SET_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_SET_LABEL_BO, struct drm_panfrost_set_label_bo)
+#define DRM_IOCTL_PANFROST_JM_CTX_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_JM_CTX_CREATE, struct drm_panfrost_jm_ctx_create)
+#define DRM_IOCTL_PANFROST_JM_CTX_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_JM_CTX_DESTROY, struct drm_panfrost_jm_ctx_destroy)
/*
* Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module
@@ -71,6 +75,12 @@ struct drm_panfrost_submit {
/** A combination of PANFROST_JD_REQ_* */
__u32 requirements;
+
+ /** JM context handle. Zero if you want to use the default context. */
+ __u32 jm_ctx_handle;
+
+ /** Padding field. MBZ. */
+ __u32 pad;
};
/**
@@ -177,6 +187,7 @@ enum drm_panfrost_param {
DRM_PANFROST_PARAM_AFBC_FEATURES,
DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP,
DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP_FREQUENCY,
+ DRM_PANFROST_PARAM_ALLOWED_JM_CTX_PRIORITIES,
};
struct drm_panfrost_get_param {
@@ -299,6 +310,45 @@ struct panfrost_dump_registers {
__u32 value;
};
+enum drm_panfrost_jm_ctx_priority {
+ /**
+ * @PANFROST_JM_CTX_PRIORITY_LOW: Low priority context.
+ */
+ PANFROST_JM_CTX_PRIORITY_LOW = 0,
+
+ /**
+ * @PANFROST_JM_CTX_PRIORITY_MEDIUM: Medium priority context.
+ */
+ PANFROST_JM_CTX_PRIORITY_MEDIUM,
+
+ /**
+ * @PANFROST_JM_CTX_PRIORITY_HIGH: High priority context.
+ *
+ * Requires CAP_SYS_NICE or DRM_MASTER.
+ */
+ PANFROST_JM_CTX_PRIORITY_HIGH,
+};
+
+struct drm_panfrost_jm_ctx_create {
+ /** @handle: Handle of the created JM context */
+ __u32 handle;
+
+ /** @priority: Context priority (see enum drm_panfrost_jm_ctx_priority). */
+ __u32 priority;
+};
+
+struct drm_panfrost_jm_ctx_destroy {
+ /**
+ * @handle: Handle of the JM context to destroy.
+ *
+ * Must be a valid context handle returned by DRM_IOCTL_PANTHOR_JM_CTX_CREATE.
+ */
+ __u32 handle;
+
+ /** @pad: Padding field, MBZ. */
+ __u32 pad;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index 517489a7ec60..47853659a705 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -771,7 +771,11 @@ struct drm_xe_device_query {
* until the object is either bound to a virtual memory region via
* VM_BIND or accessed by the CPU. As a result, no backing memory is
* reserved at the time of GEM object creation.
- * - %DRM_XE_GEM_CREATE_FLAG_SCANOUT
+ * - %DRM_XE_GEM_CREATE_FLAG_SCANOUT - Indicates that the GEM object is
+ * intended for scanout via the display engine. When set, kernel ensures
+ * that the allocation is placed in a memory region compatible with the
+ * display engine requirements. This may impose restrictions on tiling,
+ * alignment, and memory placement to guarantee proper display functionality.
* - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a
* possible placement, ensure that the corresponding VRAM allocation
* will always use the CPU accessible part of VRAM. This is important
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index d9b9dcba6ff7..d8fd6f779f79 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -42,6 +42,7 @@
#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/sizes.h>
+#include <linux/dma-buf/heaps/cma.h>
#include <linux/dma-map-ops.h>
#include <linux/cma.h>
#include <linux/nospec.h>
@@ -241,6 +242,8 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
}
if (selected_size && !dma_contiguous_default_area) {
+ int ret;
+
pr_debug("%s: reserving %ld MiB for global area\n", __func__,
(unsigned long)selected_size / SZ_1M);
@@ -248,6 +251,10 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
selected_limit,
&dma_contiguous_default_area,
fixed);
+
+ ret = dma_heap_cma_register_heap(dma_contiguous_default_area);
+ if (ret)
+ pr_warn("Couldn't register default CMA heap.");
}
}
@@ -493,6 +500,10 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);
+ err = dma_heap_cma_register_heap(cma);
+ if (err)
+ pr_warn("Couldn't register CMA heap.");
+
return 0;
}
RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);