summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.clang-format2
-rw-r--r--drivers/accel/amdxdna/aie2_pci.c6
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.c47
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c38
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c3
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c66
-rw-r--r--drivers/accel/ivpu/ivpu_fw.h7
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h7
-rw-r--r--drivers/accel/ivpu/ivpu_job.c106
-rw-r--r--drivers/accel/ivpu/ivpu_job.h4
-rw-r--r--drivers/accel/ivpu/vpu_jsm_api.h513
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c7
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c11
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c8
-rw-r--r--drivers/gpu/drm/drm_atomic.c3
-rw-r--r--drivers/gpu/drm/drm_bridge.c6
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c44
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c30
-rw-r--r--drivers/gpu/drm/drm_gem.c16
-rw-r--r--drivers/gpu/drm/drm_gem_dma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c98
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c6
-rw-r--r--drivers/gpu/drm/gud/gud_connector.c8
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-tve.c17
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c4
-rw-r--r--drivers/gpu/drm/loongson/lsdc_gem.c2
-rw-r--r--drivers/gpu/drm/mcde/mcde_clk_div.c13
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.c320
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.c185
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.h18
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c4
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c8
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h11
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c152
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c195
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.h25
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c2
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c40
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.h3
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c13
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/scheduler/tests/sched_tests.h3
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c6
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c14
-rw-r--r--drivers/gpu/drm/stm/lvds.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c18
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c4
-rw-r--r--drivers/gpu/drm/tegra/sor.c4
-rw-r--r--drivers/gpu/drm/tests/.kunitconfig2
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c12
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c60
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c15
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_internal.h2
-rw-r--r--drivers/gpu/drm/vc4/Kconfig1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c137
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c2
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c2
-rw-r--r--drivers/gpu/host1x/bus.c12
-rw-r--r--drivers/gpu/host1x/dev.c11
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c106
-rw-r--r--drivers/gpu/host1x/syncpt.c4
-rw-r--r--drivers/video/fbdev/core/bitblit.c122
-rw-r--r--drivers/video/fbdev/core/fbcon.c459
-rw-r--r--drivers/video/fbdev/core/fbcon.h17
-rw-r--r--drivers/video/fbdev/core/fbcon_ccw.c151
-rw-r--r--drivers/video/fbdev/core/fbcon_cw.c151
-rw-r--r--drivers/video/fbdev/core/fbcon_rotate.c47
-rw-r--r--drivers/video/fbdev/core/fbcon_rotate.h18
-rw-r--r--drivers/video/fbdev/core/fbcon_ud.c167
-rw-r--r--drivers/video/fbdev/core/softcursor.c18
-rw-r--r--drivers/video/fbdev/core/tileblit.c32
-rw-r--r--include/drm/display/drm_dp.h3
-rw-r--r--include/drm/display/drm_dp_helper.h8
-rw-r--r--include/drm/drm_bridge.h61
-rw-r--r--include/drm/drm_client.h1
-rw-r--r--include/drm/drm_gem_shmem_helper.h2
-rw-r--r--include/drm/gpu_scheduler.h2
-rw-r--r--include/drm/ttm/ttm_bo.h2
-rw-r--r--include/uapi/drm/ivpu_accel.h11
-rw-r--r--include/uapi/drm/panfrost_drm.h50
99 files changed, 2631 insertions, 1271 deletions
diff --git a/.clang-format b/.clang-format
index 48405c54ef27..d5c05db1a0d9 100644
--- a/.clang-format
+++ b/.clang-format
@@ -167,7 +167,7 @@ ForEachMacros:
- 'drm_connector_for_each_possible_encoder'
- 'drm_exec_for_each_locked_object'
- 'drm_exec_for_each_locked_object_reverse'
- - 'drm_for_each_bridge_in_chain'
+ - 'drm_for_each_bridge_in_chain_scoped'
- 'drm_for_each_connector_iter'
- 'drm_for_each_crtc'
- 'drm_for_each_crtc_reverse'
diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
index 87c425e3d2b9..6e39c769bb6d 100644
--- a/drivers/accel/amdxdna/aie2_pci.c
+++ b/drivers/accel/amdxdna/aie2_pci.c
@@ -898,6 +898,12 @@ static int aie2_query_ctx_status_array(struct amdxdna_client *client,
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ if (args->element_size > SZ_4K || args->num_element > SZ_1K) {
+ XDNA_DBG(xdna, "Invalid element size %d or number of element %d",
+ args->element_size, args->num_element);
+ return -EINVAL;
+ }
+
array_args.element_size = min(args->element_size,
sizeof(struct amdxdna_drm_hwctx_entry));
array_args.buffer = args->buffer;
diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c
index d407a36eb412..7f91863c3f24 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.c
+++ b/drivers/accel/amdxdna/amdxdna_gem.c
@@ -392,35 +392,33 @@ static const struct dma_buf_ops amdxdna_dmabuf_ops = {
.vunmap = drm_gem_dmabuf_vunmap,
};
-static int amdxdna_gem_obj_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+static int amdxdna_gem_obj_vmap(struct amdxdna_gem_obj *abo, void **vaddr)
{
- struct amdxdna_gem_obj *abo = to_xdna_obj(obj);
-
- iosys_map_clear(map);
-
- dma_resv_assert_held(obj->resv);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
+ int ret;
if (is_import_bo(abo))
- dma_buf_vmap(abo->dma_buf, map);
+ ret = dma_buf_vmap_unlocked(abo->dma_buf, &map);
else
- drm_gem_shmem_object_vmap(obj, map);
-
- if (!map->vaddr)
- return -ENOMEM;
+ ret = drm_gem_vmap(to_gobj(abo), &map);
- return 0;
+ *vaddr = map.vaddr;
+ return ret;
}
-static void amdxdna_gem_obj_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+static void amdxdna_gem_obj_vunmap(struct amdxdna_gem_obj *abo)
{
- struct amdxdna_gem_obj *abo = to_xdna_obj(obj);
+ struct iosys_map map;
+
+ if (!abo->mem.kva)
+ return;
- dma_resv_assert_held(obj->resv);
+ iosys_map_set_vaddr(&map, abo->mem.kva);
if (is_import_bo(abo))
- dma_buf_vunmap(abo->dma_buf, map);
+ dma_buf_vunmap_unlocked(abo->dma_buf, &map);
else
- drm_gem_shmem_object_vunmap(obj, map);
+ drm_gem_vunmap(to_gobj(abo), &map);
}
static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags)
@@ -455,7 +453,6 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
{
struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
- struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva);
XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
@@ -468,7 +465,7 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
if (abo->type == AMDXDNA_BO_DEV_HEAP)
drm_mm_takedown(&abo->mm);
- drm_gem_vunmap(gobj, &map);
+ amdxdna_gem_obj_vunmap(abo);
mutex_destroy(&abo->lock);
if (is_import_bo(abo)) {
@@ -489,8 +486,8 @@ static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
.pin = drm_gem_shmem_object_pin,
.unpin = drm_gem_shmem_object_unpin,
.get_sg_table = drm_gem_shmem_object_get_sg_table,
- .vmap = amdxdna_gem_obj_vmap,
- .vunmap = amdxdna_gem_obj_vunmap,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
.mmap = amdxdna_gem_obj_mmap,
.vm_ops = &drm_gem_shmem_vm_ops,
.export = amdxdna_gem_prime_export,
@@ -663,7 +660,6 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
struct drm_file *filp)
{
struct amdxdna_client *client = filp->driver_priv;
- struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
struct amdxdna_dev *xdna = to_xdna_dev(dev);
struct amdxdna_gem_obj *abo;
int ret;
@@ -692,12 +688,11 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base;
drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size);
- ret = drm_gem_vmap(to_gobj(abo), &map);
+ ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva);
if (ret) {
XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret);
goto release_obj;
}
- abo->mem.kva = map.vaddr;
client->dev_heap = abo;
drm_gem_object_get(to_gobj(abo));
@@ -748,7 +743,6 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
struct amdxdna_drm_create_bo *args,
struct drm_file *filp)
{
- struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
struct amdxdna_dev *xdna = to_xdna_dev(dev);
struct amdxdna_gem_obj *abo;
int ret;
@@ -770,12 +764,11 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
abo->type = AMDXDNA_BO_CMD;
abo->client = filp->driver_priv;
- ret = drm_gem_vmap(to_gobj(abo), &map);
+ ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva);
if (ret) {
XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
goto release_obj;
}
- abo->mem.kva = map.vaddr;
return abo;
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
index cd24ccd20ba6..3bd85ee6c26b 100644
--- a/drivers/accel/ivpu/ivpu_debugfs.c
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -398,35 +398,25 @@ static int dct_active_set(void *data, u64 active_percent)
DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n");
+static void print_priority_band(struct seq_file *s, struct ivpu_hw_info *hw,
+ int band, const char *name)
+{
+ seq_printf(s, "%-9s: grace_period %9u process_grace_period %9u process_quantum %9u\n",
+ name,
+ hw->hws.grace_period[band],
+ hw->hws.process_grace_period[band],
+ hw->hws.process_quantum[band]);
+}
+
static int priority_bands_show(struct seq_file *s, void *v)
{
struct ivpu_device *vdev = s->private;
struct ivpu_hw_info *hw = vdev->hw;
- for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
- band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
- switch (band) {
- case VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE:
- seq_puts(s, "Idle: ");
- break;
-
- case VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL:
- seq_puts(s, "Normal: ");
- break;
-
- case VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS:
- seq_puts(s, "Focus: ");
- break;
-
- case VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME:
- seq_puts(s, "Realtime: ");
- break;
- }
-
- seq_printf(s, "grace_period %9u process_grace_period %9u process_quantum %9u\n",
- hw->hws.grace_period[band], hw->hws.process_grace_period[band],
- hw->hws.process_quantum[band]);
- }
+ print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE, "Idle");
+ print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL, "Normal");
+ print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS, "Focus");
+ print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME, "Realtime");
return 0;
}
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 3289751b4757..a08f99c3ba4a 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -200,6 +200,9 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
case DRM_IVPU_PARAM_CAPABILITIES:
args->value = ivpu_is_capable(vdev, args->index);
break;
+ case DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE:
+ args->value = ivpu_fw_preempt_buf_size(vdev);
+ break;
default:
ret = -EINVAL;
break;
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 9db741695401..32f513499829 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -26,6 +26,8 @@
#define FW_RUNTIME_MIN_ADDR (FW_GLOBAL_MEM_START)
#define FW_RUNTIME_MAX_ADDR (FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE)
#define FW_FILE_IMAGE_OFFSET (VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE)
+#define FW_PREEMPT_BUF_MIN_SIZE SZ_4K
+#define FW_PREEMPT_BUF_MAX_SIZE SZ_32M
#define WATCHDOG_MSS_REDIRECT 32
#define WATCHDOG_NCE_REDIRECT 33
@@ -151,6 +153,47 @@ ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_he
return VPU_SCHEDULING_MODE_HW;
}
+static void
+ivpu_preemption_config_parse(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr)
+{
+ struct ivpu_fw_info *fw = vdev->fw;
+ u32 primary_preempt_buf_size, secondary_preempt_buf_size;
+
+ if (fw_hdr->preemption_buffer_1_max_size)
+ primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size;
+ else
+ primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
+
+ if (fw_hdr->preemption_buffer_2_max_size)
+ secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size;
+ else
+ secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
+
+ ivpu_dbg(vdev, FW_BOOT, "Preemption buffer size, primary: %u, secondary: %u\n",
+ primary_preempt_buf_size, secondary_preempt_buf_size);
+
+ if (primary_preempt_buf_size < FW_PREEMPT_BUF_MIN_SIZE ||
+ secondary_preempt_buf_size < FW_PREEMPT_BUF_MIN_SIZE) {
+ ivpu_warn(vdev, "Preemption buffers size too small\n");
+ return;
+ }
+
+ if (primary_preempt_buf_size > FW_PREEMPT_BUF_MAX_SIZE ||
+ secondary_preempt_buf_size > FW_PREEMPT_BUF_MAX_SIZE) {
+ ivpu_warn(vdev, "Preemption buffers size too big\n");
+ return;
+ }
+
+ if (fw->sched_mode != VPU_SCHEDULING_MODE_HW)
+ return;
+
+ if (ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE)
+ return;
+
+ vdev->fw->primary_preempt_buf_size = ALIGN(primary_preempt_buf_size, PAGE_SIZE);
+ vdev->fw->secondary_preempt_buf_size = ALIGN(secondary_preempt_buf_size, PAGE_SIZE);
+}
+
static int ivpu_fw_parse(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
@@ -235,17 +278,9 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
fw->sched_mode = ivpu_fw_sched_mode_select(vdev, fw_hdr);
ivpu_info(vdev, "Scheduler mode: %s\n", fw->sched_mode ? "HW" : "OS");
- if (fw_hdr->preemption_buffer_1_max_size)
- fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size;
- else
- fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
-
- if (fw_hdr->preemption_buffer_2_max_size)
- fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size;
- else
- fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
- ivpu_dbg(vdev, FW_BOOT, "Preemption buffer sizes: primary %u, secondary %u\n",
- fw->primary_preempt_buf_size, fw->secondary_preempt_buf_size);
+ ivpu_preemption_config_parse(vdev, fw_hdr);
+ ivpu_dbg(vdev, FW_BOOT, "Mid-inference preemption %s supported\n",
+ ivpu_fw_preempt_buf_size(vdev) ? "is" : "is not");
if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address,
fw_hdr->ro_section_size,
@@ -483,11 +518,6 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = 0x%x\n",
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg);
- ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_base = 0x%llx\n",
- boot_params->global_memory_allocator_base);
- ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_size = 0x%x\n",
- boot_params->global_memory_allocator_size);
-
ivpu_dbg(vdev, FW_BOOT, "boot_params.shave_nn_fw_base = 0x%llx\n",
boot_params->shave_nn_fw_base);
@@ -495,10 +525,6 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
boot_params->watchdog_irq_mss);
ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_nce = 0x%x\n",
boot_params->watchdog_irq_nce);
- ivpu_dbg(vdev, FW_BOOT, "boot_params.host_to_vpu_irq = 0x%x\n",
- boot_params->host_to_vpu_irq);
- ivpu_dbg(vdev, FW_BOOT, "boot_params.job_done_irq = 0x%x\n",
- boot_params->job_done_irq);
ivpu_dbg(vdev, FW_BOOT, "boot_params.host_version_id = 0x%x\n",
boot_params->host_version_id);
diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h
index 7081913fb0dd..6fe2917abda6 100644
--- a/drivers/accel/ivpu/ivpu_fw.h
+++ b/drivers/accel/ivpu/ivpu_fw.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_FW_H__
@@ -52,4 +52,9 @@ static inline bool ivpu_fw_is_cold_boot(struct ivpu_device *vdev)
return vdev->fw->entry_point == vdev->fw->cold_boot_entry_point;
}
+static inline u32 ivpu_fw_preempt_buf_size(struct ivpu_device *vdev)
+{
+ return vdev->fw->primary_preempt_buf_size + vdev->fw->secondary_preempt_buf_size;
+}
+
#endif /* __IVPU_FW_H__ */
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
index aa8ff14f7aae..3ee996d503b2 100644
--- a/drivers/accel/ivpu/ivpu_gem.h
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_GEM_H__
#define __IVPU_GEM_H__
@@ -96,4 +96,9 @@ static inline u32 cpu_to_vpu_addr(struct ivpu_bo *bo, void *cpu_addr)
return bo->vpu_addr + (cpu_addr - ivpu_bo_vaddr(bo));
}
+static inline bool ivpu_bo_is_mappable(struct ivpu_bo *bo)
+{
+ return bo->flags & DRM_IVPU_BO_MAPPABLE;
+}
+
#endif /* __IVPU_GEM_H__ */
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index 060f1fc031d3..044268d0fc87 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -34,22 +34,20 @@ static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
- u64 primary_size = ALIGN(vdev->fw->primary_preempt_buf_size, PAGE_SIZE);
- u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE);
-
- if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW ||
- ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE)
+ if (ivpu_fw_preempt_buf_size(vdev) == 0)
return 0;
cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.user,
- primary_size, DRM_IVPU_BO_WC);
+ vdev->fw->primary_preempt_buf_size,
+ DRM_IVPU_BO_WC);
if (!cmdq->primary_preempt_buf) {
ivpu_err(vdev, "Failed to create primary preemption buffer\n");
return -ENOMEM;
}
cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.dma,
- secondary_size, DRM_IVPU_BO_WC);
+ vdev->fw->secondary_preempt_buf_size,
+ DRM_IVPU_BO_WC);
if (!cmdq->secondary_preempt_buf) {
ivpu_err(vdev, "Failed to create secondary preemption buffer\n");
goto err_free_primary;
@@ -66,20 +64,39 @@ err_free_primary:
static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
- if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
- return;
-
if (cmdq->primary_preempt_buf)
ivpu_bo_free(cmdq->primary_preempt_buf);
if (cmdq->secondary_preempt_buf)
ivpu_bo_free(cmdq->secondary_preempt_buf);
}
+static int ivpu_preemption_job_init(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv,
+ struct ivpu_cmdq *cmdq, struct ivpu_job *job)
+{
+ int ret;
+
+ /* Use preemption buffer provided by the user space */
+ if (job->primary_preempt_buf)
+ return 0;
+
+ if (!cmdq->primary_preempt_buf) {
+ /* Allocate per command queue preemption buffers */
+ ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
+ if (ret)
+ return ret;
+ }
+
+ /* Use preemption buffers allocated by the kernel */
+ job->primary_preempt_buf = cmdq->primary_preempt_buf;
+ job->secondary_preempt_buf = cmdq->secondary_preempt_buf;
+
+ return 0;
+}
+
static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
{
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_cmdq *cmdq;
- int ret;
cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL);
if (!cmdq)
@@ -89,10 +106,6 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
if (!cmdq->mem)
goto err_free_cmdq;
- ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
- if (ret)
- ivpu_warn(vdev, "Failed to allocate preemption buffers, preemption limited\n");
-
return cmdq;
err_free_cmdq:
@@ -219,11 +232,13 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
- if (!ret)
+ if (!ret) {
ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n",
cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority);
- else
+ } else {
xa_erase(&vdev->db_xa, cmdq->db_id);
+ cmdq->db_id = 0;
+ }
return ret;
}
@@ -427,17 +442,14 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
- if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
- if (cmdq->primary_preempt_buf) {
- entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
- entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
- }
+ if (job->primary_preempt_buf) {
+ entry->primary_preempt_buf_addr = job->primary_preempt_buf->vpu_addr;
+ entry->primary_preempt_buf_size = ivpu_bo_size(job->primary_preempt_buf);
+ }
- if (cmdq->secondary_preempt_buf) {
- entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr;
- entry->secondary_preempt_buf_size =
- ivpu_bo_size(cmdq->secondary_preempt_buf);
- }
+ if (job->secondary_preempt_buf) {
+ entry->secondary_preempt_buf_addr = job->secondary_preempt_buf->vpu_addr;
+ entry->secondary_preempt_buf_size = ivpu_bo_size(job->secondary_preempt_buf);
}
wmb(); /* Ensure that tail is updated after filling entry */
@@ -661,6 +673,13 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
goto err_unlock;
}
+ ret = ivpu_preemption_job_init(vdev, file_priv, cmdq, job);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize preemption buffers for job %d: %d\n",
+ job->job_id, ret);
+ goto err_unlock;
+ }
+
job->cmdq_id = cmdq->id;
is_first_job = xa_empty(&vdev->submitted_jobs_xa);
@@ -714,7 +733,7 @@ err_unlock:
static int
ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles,
- u32 buf_count, u32 commands_offset)
+ u32 buf_count, u32 commands_offset, u32 preempt_buffer_index)
{
struct ivpu_file_priv *file_priv = job->file_priv;
struct ivpu_device *vdev = file_priv->vdev;
@@ -750,6 +769,20 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
+ if (preempt_buffer_index) {
+ struct ivpu_bo *preempt_bo = job->bos[preempt_buffer_index];
+
+ if (ivpu_bo_size(preempt_bo) < ivpu_fw_preempt_buf_size(vdev)) {
+ ivpu_warn(vdev, "Preemption buffer is too small\n");
+ return -EINVAL;
+ }
+ if (ivpu_bo_is_mappable(preempt_bo)) {
+ ivpu_warn(vdev, "Preemption buffer cannot be mappable\n");
+ return -EINVAL;
+ }
+ job->primary_preempt_buf = preempt_bo;
+ }
+
ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count,
&acquire_ctx);
if (ret) {
@@ -780,7 +813,7 @@ unlock_reservations:
static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv, u32 cmdq_id,
u32 buffer_count, u32 engine, void __user *buffers_ptr, u32 cmds_offset,
- u8 priority)
+ u32 preempt_buffer_index, u8 priority)
{
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_job *job;
@@ -812,7 +845,8 @@ static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv,
goto err_exit_dev;
}
- ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset);
+ ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset,
+ preempt_buffer_index);
if (ret) {
ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
goto err_destroy_job;
@@ -866,7 +900,7 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
priority = ivpu_job_to_jsm_priority(args->priority);
return ivpu_submit(file, file_priv, 0, args->buffer_count, args->engine,
- (void __user *)args->buffers_ptr, args->commands_offset, priority);
+ (void __user *)args->buffers_ptr, args->commands_offset, 0, priority);
}
int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
@@ -883,6 +917,9 @@ int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *
if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT)
return -EINVAL;
+ if (args->preempt_buffer_index >= args->buffer_count)
+ return -EINVAL;
+
if (!IS_ALIGNED(args->commands_offset, 8))
return -EINVAL;
@@ -893,7 +930,8 @@ int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *
return -EBADFD;
return ivpu_submit(file, file_priv, args->cmdq_id, args->buffer_count, VPU_ENGINE_COMPUTE,
- (void __user *)args->buffers_ptr, args->commands_offset, 0);
+ (void __user *)args->buffers_ptr, args->commands_offset,
+ args->preempt_buffer_index, 0);
}
int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
@@ -1012,7 +1050,7 @@ void ivpu_context_abort_work_fn(struct work_struct *work)
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
if (ivpu_jsm_reset_engine(vdev, 0))
- return;
+ goto runtime_put;
mutex_lock(&vdev->context_list_lock);
xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
@@ -1036,7 +1074,7 @@ void ivpu_context_abort_work_fn(struct work_struct *work)
goto runtime_put;
if (ivpu_jsm_hws_resume_engine(vdev, 0))
- return;
+ goto runtime_put;
/*
* In hardware scheduling mode NPU already has stopped processing jobs
* and won't send us any further notifications, thus we have to free job related resources
diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h
index 2e301c2eea7b..6c8b9c739b51 100644
--- a/drivers/accel/ivpu/ivpu_job.h
+++ b/drivers/accel/ivpu/ivpu_job.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_JOB_H__
@@ -55,6 +55,8 @@ struct ivpu_job {
u32 job_id;
u32 engine_idx;
size_t bo_count;
+ struct ivpu_bo *primary_preempt_buf;
+ struct ivpu_bo *secondary_preempt_buf;
struct ivpu_bo *bos[] __counted_by(bo_count);
};
diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h
index 4b6b2b3d2583..de1b37ea1251 100644
--- a/drivers/accel/ivpu/vpu_jsm_api.h
+++ b/drivers/accel/ivpu/vpu_jsm_api.h
@@ -1,15 +1,16 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright (c) 2020-2024, Intel Corporation.
+ * Copyright (c) 2020-2025, Intel Corporation.
+ */
+
+/**
+ * @addtogroup Jsm
+ * @{
*/
/**
* @file
* @brief JSM shared definitions
- *
- * @ingroup Jsm
- * @brief JSM shared definitions
- * @{
*/
#ifndef VPU_JSM_API_H
#define VPU_JSM_API_H
@@ -22,12 +23,12 @@
/*
* Minor version changes when API backward compatibility is preserved.
*/
-#define VPU_JSM_API_VER_MINOR 29
+#define VPU_JSM_API_VER_MINOR 32
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
*/
-#define VPU_JSM_API_VER_PATCH 0
+#define VPU_JSM_API_VER_PATCH 5
/*
* Index in the API version table
@@ -71,9 +72,12 @@
#define VPU_JSM_STATUS_MVNCI_OUT_OF_RESOURCES 0xAU
#define VPU_JSM_STATUS_MVNCI_NOT_IMPLEMENTED 0xBU
#define VPU_JSM_STATUS_MVNCI_INTERNAL_ERROR 0xCU
-/* Job status returned when the job was preempted mid-inference */
+/* @deprecated (use VPU_JSM_STATUS_PREEMPTED_MID_COMMAND instead) */
#define VPU_JSM_STATUS_PREEMPTED_MID_INFERENCE 0xDU
+/* Job status returned when the job was preempted mid-command */
+#define VPU_JSM_STATUS_PREEMPTED_MID_COMMAND 0xDU
#define VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW 0xEU
+#define VPU_JSM_STATUS_MVNCI_PREEMPTION_TIMED_OUT 0xFU
/*
* Host <-> VPU IPC channels.
@@ -134,11 +138,21 @@ enum {
* 2. Native fence queues are only supported on VPU 40xx onwards.
*/
VPU_JOB_QUEUE_FLAGS_USE_NATIVE_FENCE_MASK = (1 << 1U),
-
/*
* Enable turbo mode for testing NPU performance; not recommended for regular usage.
*/
- VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U)
+ VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U),
+ /*
+ * Queue error detection mode flag
+ * For 'interactive' queues (this bit not set), the FW will identify queues that have not
+ * completed a job inside the TDR timeout as in error as part of engine reset sequence.
+ * For 'non-interactive' queues (this bit set), the FW will identify queues that have not
+ * progressed the heartbeat inside the non-interactive no-progress timeout as in error as
+ * part of engine reset sequence. Additionally, there is an upper limit applied to these
+ * queues: even if they progress the heartbeat, if they run longer than non-interactive
+ * timeout, then the FW will also identify them as in error.
+ */
+ VPU_JOB_QUEUE_FLAGS_NON_INTERACTIVE = (1 << 3U)
};
/*
@@ -209,7 +223,7 @@ enum {
*/
#define VPU_INLINE_CMD_TYPE_FENCE_SIGNAL 0x2
-/*
+/**
* Job scheduling priority bands for both hardware scheduling and OS scheduling.
*/
enum vpu_job_scheduling_priority_band {
@@ -220,16 +234,16 @@ enum vpu_job_scheduling_priority_band {
VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT = 4,
};
-/*
+/**
* Job format.
* Jobs defines the actual workloads to be executed by a given engine.
*/
struct vpu_job_queue_entry {
- /**< Address of VPU commands batch buffer */
+ /** Address of VPU commands batch buffer */
u64 batch_buf_addr;
- /**< Job ID */
+ /** Job ID */
u32 job_id;
- /**< Flags bit field, see VPU_JOB_FLAGS_* above */
+ /** Flags bit field, see VPU_JOB_FLAGS_* above */
u32 flags;
/**
* Doorbell ring timestamp taken by KMD from SoC's global system clock, in
@@ -237,20 +251,20 @@ struct vpu_job_queue_entry {
* to match other profiling timestamps.
*/
u64 doorbell_timestamp;
- /**< Extra id for job tracking, used only in the firmware perf traces */
+ /** Extra id for job tracking, used only in the firmware perf traces */
u64 host_tracking_id;
- /**< Address of the primary preemption buffer to use for this job */
+ /** Address of the primary preemption buffer to use for this job */
u64 primary_preempt_buf_addr;
- /**< Size of the primary preemption buffer to use for this job */
+ /** Size of the primary preemption buffer to use for this job */
u32 primary_preempt_buf_size;
- /**< Size of secondary preemption buffer to use for this job */
+ /** Size of secondary preemption buffer to use for this job */
u32 secondary_preempt_buf_size;
- /**< Address of secondary preemption buffer to use for this job */
+ /** Address of secondary preemption buffer to use for this job */
u64 secondary_preempt_buf_addr;
u64 reserved_0;
};
-/*
+/**
* Inline command format.
* Inline commands are the commands executed at scheduler level (typically,
* synchronization directives). Inline command and job objects must be of
@@ -258,34 +272,36 @@ struct vpu_job_queue_entry {
*/
struct vpu_inline_cmd {
u64 reserved_0;
- /* Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */
+ /** Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */
u32 type;
- /* Flags bit field, see VPU_JOB_FLAGS_* above. */
+ /** Flags bit field, see VPU_JOB_FLAGS_* above. */
u32 flags;
- /* Inline command payload. Depends on inline command type. */
- union {
- /* Fence (wait and signal) commands' payload. */
- struct {
- /* Fence object handle. */
+ /** Inline command payload. Depends on inline command type. */
+ union payload {
+ /** Fence (wait and signal) commands' payload. */
+ struct fence {
+ /** Fence object handle. */
u64 fence_handle;
- /* User VA of the current fence value. */
+ /** User VA of the current fence value. */
u64 current_value_va;
- /* User VA of the monitored fence value (read-only). */
+ /** User VA of the monitored fence value (read-only). */
u64 monitored_value_va;
- /* Value to wait for or write in fence location. */
+ /** Value to wait for or write in fence location. */
u64 value;
- /* User VA of the log buffer in which to add log entry on completion. */
+ /** User VA of the log buffer in which to add log entry on completion. */
u64 log_buffer_va;
- /* NPU private data. */
+ /** NPU private data. */
u64 npu_private_data;
} fence;
- /* Other commands do not have a payload. */
- /* Payload definition for future inline commands can be inserted here. */
+ /**
+ * Other commands do not have a payload:
+ * Payload definition for future inline commands can be inserted here.
+ */
u64 reserved_1[6];
} payload;
};
-/*
+/**
* Job queue slots can be populated either with job objects or inline command objects.
*/
union vpu_jobq_slot {
@@ -293,7 +309,7 @@ union vpu_jobq_slot {
struct vpu_inline_cmd inline_cmd;
};
-/*
+/**
* Job queue control registers.
*/
struct vpu_job_queue_header {
@@ -301,18 +317,18 @@ struct vpu_job_queue_header {
u32 head;
u32 tail;
u32 flags;
- /* Set to 1 to indicate priority_band field is valid */
+ /** Set to 1 to indicate priority_band field is valid */
u32 priority_band_valid;
- /*
+ /**
* Priority for the work of this job queue, valid only if the HWS is NOT used
- * and the `priority_band_valid` is set to 1. It is applied only during
- * the VPU_JSM_MSG_REGISTER_DB message processing.
- * The device firmware might use the `priority_band` to optimize the power
+ * and the @ref priority_band_valid is set to 1. It is applied only during
+ * the @ref VPU_JSM_MSG_REGISTER_DB message processing.
+ * The device firmware might use the priority_band to optimize the power
* management logic, but it will not affect the order of jobs.
* Available priority bands: @see enum vpu_job_scheduling_priority_band
*/
u32 priority_band;
- /* Inside realtime band assigns a further priority, limited to 0..31 range */
+ /** Inside realtime band assigns a further priority, limited to 0..31 range */
u32 realtime_priority_level;
u32 reserved_0[9];
};
@@ -337,16 +353,16 @@ enum vpu_trace_entity_type {
VPU_TRACE_ENTITY_TYPE_HW_COMPONENT = 2,
};
-/*
+/**
* HWS specific log buffer header details.
* Total size is 32 bytes.
*/
struct vpu_hws_log_buffer_header {
- /* Written by VPU after adding a log entry. Initialised by host to 0. */
+ /** Written by VPU after adding a log entry. Initialised by host to 0. */
u32 first_free_entry_index;
- /* Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */
+ /** Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */
u32 wraparound_count;
- /*
+ /**
* This is the number of buffers that can be stored in the log buffer provided by the host.
* It is written by host before passing buffer to VPU. VPU should consider it read-only.
*/
@@ -354,14 +370,14 @@ struct vpu_hws_log_buffer_header {
u64 reserved[2];
};
-/*
+/**
* HWS specific log buffer entry details.
* Total size is 32 bytes.
*/
struct vpu_hws_log_buffer_entry {
- /* VPU timestamp must be an invariant timer tick (not impacted by DVFS) */
+ /** VPU timestamp must be an invariant timer tick (not impacted by DVFS) */
u64 vpu_timestamp;
- /*
+ /**
* Operation type:
* 0 - context state change
* 1 - queue new work
@@ -371,7 +387,7 @@ struct vpu_hws_log_buffer_entry {
*/
u32 operation_type;
u32 reserved;
- /* Operation data depends on operation type */
+ /** Operation data depends on operation type */
u64 operation_data[2];
};
@@ -381,51 +397,54 @@ enum vpu_hws_native_fence_log_type {
VPU_HWS_NATIVE_FENCE_LOG_TYPE_SIGNALS = 2
};
-/* HWS native fence log buffer header. */
+/** HWS native fence log buffer header. */
struct vpu_hws_native_fence_log_header {
union {
struct {
- /* Index of the first free entry in buffer. */
+ /** Index of the first free entry in buffer. */
u32 first_free_entry_idx;
- /* Incremented each time NPU wraps around the buffer to write next entry. */
+ /**
+ * Incremented each time NPU wraps around
+ * the buffer to write next entry.
+ */
u32 wraparound_count;
};
- /* Field allowing atomic update of both fields above. */
+ /** Field allowing atomic update of both fields above. */
u64 atomic_wraparound_and_entry_idx;
};
- /* Log buffer type, see enum vpu_hws_native_fence_log_type. */
+ /** Log buffer type, see enum vpu_hws_native_fence_log_type. */
u64 type;
- /* Allocated number of entries in the log buffer. */
+ /** Allocated number of entries in the log buffer. */
u64 entry_nb;
u64 reserved[2];
};
-/* Native fence log operation types. */
+/** Native fence log operation types. */
enum vpu_hws_native_fence_log_op {
VPU_HWS_NATIVE_FENCE_LOG_OP_SIGNAL_EXECUTED = 0,
VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED = 1
};
-/* HWS native fence log entry. */
+/** HWS native fence log entry. */
struct vpu_hws_native_fence_log_entry {
- /* Newly signaled/unblocked fence value. */
+ /** Newly signaled/unblocked fence value. */
u64 fence_value;
- /* Native fence object handle to which this operation belongs. */
+ /** Native fence object handle to which this operation belongs. */
u64 fence_handle;
- /* Operation type, see enum vpu_hws_native_fence_log_op. */
+ /** Operation type, see enum vpu_hws_native_fence_log_op. */
u64 op_type;
u64 reserved_0;
- /*
+ /**
* VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED only: Timestamp at which fence
* wait was started (in NPU SysTime).
*/
u64 fence_wait_start_ts;
u64 reserved_1;
- /* Timestamp at which fence operation was completed (in NPU SysTime). */
+ /** Timestamp at which fence operation was completed (in NPU SysTime). */
u64 fence_end_ts;
};
-/* Native fence log buffer. */
+/** Native fence log buffer. */
struct vpu_hws_native_fence_log_buffer {
struct vpu_hws_native_fence_log_header header;
struct vpu_hws_native_fence_log_entry entry[];
@@ -450,8 +469,21 @@ enum vpu_ipc_msg_type {
* after preemption or when resubmitting jobs to the queue.
*/
VPU_JSM_MSG_ENGINE_PREEMPT = 0x1101,
+ /**
+ * OS scheduling doorbell register command
+ * @see vpu_ipc_msg_payload_register_db
+ */
VPU_JSM_MSG_REGISTER_DB = 0x1102,
+ /**
+ * OS scheduling doorbell unregister command
+ * @see vpu_ipc_msg_payload_unregister_db
+ */
VPU_JSM_MSG_UNREGISTER_DB = 0x1103,
+ /**
+ * Query engine heartbeat. Heartbeat is expected to increase monotonically
+ * and increase while work is being progressed by NPU.
+ * @see vpu_ipc_msg_payload_query_engine_hb
+ */
VPU_JSM_MSG_QUERY_ENGINE_HB = 0x1104,
VPU_JSM_MSG_GET_POWER_LEVEL_COUNT = 0x1105,
VPU_JSM_MSG_GET_POWER_LEVEL = 0x1106,
@@ -477,6 +509,7 @@ enum vpu_ipc_msg_type {
* aborted and removed from internal scheduling queues. All doorbells assigned
* to the host_ssid are unregistered and any internal FW resources belonging to
* the host_ssid are released.
+ * @see vpu_ipc_msg_payload_ssid_release
*/
VPU_JSM_MSG_SSID_RELEASE = 0x110e,
/**
@@ -504,26 +537,51 @@ enum vpu_ipc_msg_type {
* @see vpu_jsm_metric_streamer_start
*/
VPU_JSM_MSG_METRIC_STREAMER_INFO = 0x1112,
- /** Control command: Priority band setup */
+ /**
+ * Control command: Priority band setup
+ * @see vpu_ipc_msg_payload_hws_priority_band_setup
+ */
VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP = 0x1113,
- /** Control command: Create command queue */
+ /**
+ * Control command: Create command queue
+ * @see vpu_ipc_msg_payload_hws_create_cmdq
+ */
VPU_JSM_MSG_CREATE_CMD_QUEUE = 0x1114,
- /** Control command: Destroy command queue */
+ /**
+ * Control command: Destroy command queue
+ * @see vpu_ipc_msg_payload_hws_destroy_cmdq
+ */
VPU_JSM_MSG_DESTROY_CMD_QUEUE = 0x1115,
- /** Control command: Set context scheduling properties */
+ /**
+ * Control command: Set context scheduling properties
+ * @see vpu_ipc_msg_payload_hws_set_context_sched_properties
+ */
VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES = 0x1116,
- /*
+ /**
* Register a doorbell to notify VPU of new work. The doorbell may later be
* deallocated or reassigned to another context.
+ * @see vpu_jsm_hws_register_db
*/
VPU_JSM_MSG_HWS_REGISTER_DB = 0x1117,
- /** Control command: Log buffer setting */
+ /**
+ * Control command: Log buffer setting
+ * @see vpu_ipc_msg_payload_hws_set_scheduling_log
+ */
VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG = 0x1118,
- /* Control command: Suspend command queue. */
+ /**
+ * Control command: Suspend command queue.
+ * @see vpu_ipc_msg_payload_hws_suspend_cmdq
+ */
VPU_JSM_MSG_HWS_SUSPEND_CMDQ = 0x1119,
- /* Control command: Resume command queue */
+ /**
+ * Control command: Resume command queue
+ * @see vpu_ipc_msg_payload_hws_resume_cmdq
+ */
VPU_JSM_MSG_HWS_RESUME_CMDQ = 0x111a,
- /* Control command: Resume engine after reset */
+ /**
+ * Control command: Resume engine after reset
+ * @see vpu_ipc_msg_payload_hws_resume_engine
+ */
VPU_JSM_MSG_HWS_ENGINE_RESUME = 0x111b,
/* Control command: Enable survivability/DCT mode */
VPU_JSM_MSG_DCT_ENABLE = 0x111c,
@@ -540,7 +598,8 @@ enum vpu_ipc_msg_type {
VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED = VPU_JSM_MSG_GENERAL_CMD,
/**
* Control dyndbg behavior by executing a dyndbg command; equivalent to
- * Linux command: `echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control`.
+ * Linux command:
+ * @verbatim echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control @endverbatim
*/
VPU_JSM_MSG_DYNDBG_CONTROL = 0x1201,
/**
@@ -550,15 +609,26 @@ enum vpu_ipc_msg_type {
/* IPC Device -> Host, Job completion */
VPU_JSM_MSG_JOB_DONE = 0x2100,
- /* IPC Device -> Host, Fence signalled */
+ /**
+ * IPC Device -> Host, Fence signalled
+ * @see vpu_ipc_msg_payload_native_fence_signalled
+ */
VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED = 0x2101,
/* IPC Device -> Host, Async command completion */
VPU_JSM_MSG_ASYNC_CMD_DONE = 0x2200,
+ /**
+ * IPC Device -> Host, engine reset complete
+ * @see vpu_ipc_msg_payload_engine_reset_done
+ */
VPU_JSM_MSG_ENGINE_RESET_DONE = VPU_JSM_MSG_ASYNC_CMD_DONE,
VPU_JSM_MSG_ENGINE_PREEMPT_DONE = 0x2201,
VPU_JSM_MSG_REGISTER_DB_DONE = 0x2202,
VPU_JSM_MSG_UNREGISTER_DB_DONE = 0x2203,
+ /**
+ * Response to query engine heartbeat.
+ * @see vpu_ipc_msg_payload_query_engine_hb_done
+ */
VPU_JSM_MSG_QUERY_ENGINE_HB_DONE = 0x2204,
VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE = 0x2205,
VPU_JSM_MSG_GET_POWER_LEVEL_DONE = 0x2206,
@@ -575,7 +645,10 @@ enum vpu_ipc_msg_type {
VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP = 0x220c,
/** Response to VPU_JSM_MSG_TRACE_GET_NAME. */
VPU_JSM_MSG_TRACE_GET_NAME_RSP = 0x220d,
- /** Response to VPU_JSM_MSG_SSID_RELEASE. */
+ /**
+ * Response to VPU_JSM_MSG_SSID_RELEASE.
+ * @see vpu_ipc_msg_payload_ssid_release
+ */
VPU_JSM_MSG_SSID_RELEASE_DONE = 0x220e,
/**
* Response to VPU_JSM_MSG_METRIC_STREAMER_START.
@@ -605,29 +678,56 @@ enum vpu_ipc_msg_type {
/**
* Asynchronous event sent from the VPU to the host either when the current
* metric buffer is full or when the VPU has collected a multiple of
- * @notify_sample_count samples as indicated through the start command
- * (VPU_JSM_MSG_METRIC_STREAMER_START). Returns information about collected
- * metric data.
+ * @ref vpu_jsm_metric_streamer_start::notify_sample_count samples as indicated
+ * through the start command (VPU_JSM_MSG_METRIC_STREAMER_START). Returns
+ * information about collected metric data.
* @see vpu_jsm_metric_streamer_done
*/
VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION = 0x2213,
- /** Response to control command: Priority band setup */
+ /**
+ * Response to control command: Priority band setup
+ * @see vpu_ipc_msg_payload_hws_priority_band_setup
+ */
VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP = 0x2214,
- /** Response to control command: Create command queue */
+ /**
+ * Response to control command: Create command queue
+ * @see vpu_ipc_msg_payload_hws_create_cmdq_rsp
+ */
VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP = 0x2215,
- /** Response to control command: Destroy command queue */
+ /**
+ * Response to control command: Destroy command queue
+ * @see vpu_ipc_msg_payload_hws_destroy_cmdq
+ */
VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP = 0x2216,
- /** Response to control command: Set context scheduling properties */
+ /**
+ * Response to control command: Set context scheduling properties
+ * @see vpu_ipc_msg_payload_hws_set_context_sched_properties
+ */
VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP = 0x2217,
- /** Response to control command: Log buffer setting */
+ /**
+ * Response to control command: Log buffer setting
+ * @see vpu_ipc_msg_payload_hws_set_scheduling_log
+ */
VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP = 0x2218,
- /* IPC Device -> Host, HWS notify index entry of log buffer written */
+ /**
+ * IPC Device -> Host, HWS notify index entry of log buffer written
+ * @see vpu_ipc_msg_payload_hws_scheduling_log_notification
+ */
VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION = 0x2219,
- /* IPC Device -> Host, HWS completion of a context suspend request */
+ /**
+ * IPC Device -> Host, HWS completion of a context suspend request
+ * @see vpu_ipc_msg_payload_hws_suspend_cmdq
+ */
VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE = 0x221a,
- /* Response to control command: Resume command queue */
+ /**
+ * Response to control command: Resume command queue
+ * @see vpu_ipc_msg_payload_hws_resume_cmdq
+ */
VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP = 0x221b,
- /* Response to control command: Resume engine command response */
+ /**
+ * Response to control command: Resume engine command response
+ * @see vpu_ipc_msg_payload_hws_resume_engine
+ */
VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE = 0x221c,
/* Response to control command: Enable survivability/DCT mode */
VPU_JSM_MSG_DCT_ENABLE_DONE = 0x221d,
@@ -670,40 +770,44 @@ struct vpu_ipc_msg_payload_engine_preempt {
u32 preempt_id;
};
-/*
- * @brief Register doorbell command structure.
+/**
+ * Register doorbell command structure.
* This structure supports doorbell registration for only OS scheduling.
* @see VPU_JSM_MSG_REGISTER_DB
*/
struct vpu_ipc_msg_payload_register_db {
- /* Index of the doorbell to register. */
+ /** Index of the doorbell to register. */
u32 db_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
- /* Virtual address in Global GTT pointing to the start of job queue. */
+ /** Virtual address in Global GTT pointing to the start of job queue. */
u64 jobq_base;
- /* Size of the job queue in bytes. */
+ /** Size of the job queue in bytes. */
u32 jobq_size;
- /* Host sub-stream ID for the context assigned to the doorbell. */
+ /** Host sub-stream ID for the context assigned to the doorbell. */
u32 host_ssid;
};
/**
- * @brief Unregister doorbell command structure.
+ * Unregister doorbell command structure.
* Request structure to unregister a doorbell for both HW and OS scheduling.
* @see VPU_JSM_MSG_UNREGISTER_DB
*/
struct vpu_ipc_msg_payload_unregister_db {
- /* Index of the doorbell to unregister. */
+ /** Index of the doorbell to unregister. */
u32 db_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
+/**
+ * Heartbeat request structure
+ * @see VPU_JSM_MSG_QUERY_ENGINE_HB
+ */
struct vpu_ipc_msg_payload_query_engine_hb {
- /* Engine to return heartbeat value. */
+ /** Engine to return heartbeat value. */
u32 engine_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
@@ -723,10 +827,14 @@ struct vpu_ipc_msg_payload_power_level {
u32 reserved_0;
};
+/**
+ * Structure for requesting ssid release
+ * @see VPU_JSM_MSG_SSID_RELEASE
+ */
struct vpu_ipc_msg_payload_ssid_release {
- /* Host sub-stream ID for the context to be released. */
+ /** Host sub-stream ID for the context to be released. */
u32 host_ssid;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
@@ -752,7 +860,7 @@ struct vpu_jsm_metric_streamer_start {
u64 sampling_rate;
/**
* If > 0 the VPU will send a VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION message
- * after every @notify_sample_count samples is collected or dropped by the VPU.
+ * after every @ref notify_sample_count samples is collected or dropped by the VPU.
* If set to UINT_MAX the VPU will only generate a notification when the metric
* buffer is full. If set to 0 the VPU will never generate a notification.
*/
@@ -762,9 +870,9 @@ struct vpu_jsm_metric_streamer_start {
* Address and size of the buffer where the VPU will write metric data. The
* VPU writes all counters from enabled metric groups one after another. If
* there is no space left to write data at the next sample period the VPU
- * will switch to the next buffer (@see next_buffer_addr) and will optionally
- * send a notification to the host driver if @notify_sample_count is non-zero.
- * If @next_buffer_addr is NULL the VPU will stop collecting metric data.
+ * will switch to the next buffer (@ref next_buffer_addr) and will optionally
+ * send a notification to the host driver if @ref notify_sample_count is non-zero.
+ * If @ref next_buffer_addr is NULL the VPU will stop collecting metric data.
*/
u64 buffer_addr;
u64 buffer_size;
@@ -844,38 +952,47 @@ struct vpu_ipc_msg_payload_job_done {
u64 cmdq_id;
};
-/*
+/**
* Notification message upon native fence signalling.
* @see VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED
*/
struct vpu_ipc_msg_payload_native_fence_signalled {
- /* Engine ID. */
+ /** Engine ID. */
u32 engine_idx;
- /* Host SSID. */
+ /** Host SSID. */
u32 host_ssid;
- /* CMDQ ID */
+ /** CMDQ ID */
u64 cmdq_id;
- /* Fence object handle. */
+ /** Fence object handle. */
u64 fence_handle;
};
+/**
+ * vpu_ipc_msg_payload_engine_reset_done will contain an array of this structure
+ * which contains which queues caused reset if FW was able to detect any error.
+ * @see vpu_ipc_msg_payload_engine_reset_done
+ */
struct vpu_jsm_engine_reset_context {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
- /* See VPU_ENGINE_RESET_CONTEXT_* defines */
+ /** See VPU_ENGINE_RESET_CONTEXT_* defines */
u64 flags;
};
+/**
+ * Engine reset response.
+ * @see VPU_JSM_MSG_ENGINE_RESET_DONE
+ */
struct vpu_ipc_msg_payload_engine_reset_done {
- /* Engine ordinal */
+ /** Engine ordinal */
u32 engine_idx;
- /* Number of impacted contexts */
+ /** Number of impacted contexts */
u32 num_impacted_contexts;
- /* Array of impacted command queue ids and their flags */
+ /** Array of impacted command queue ids and their flags */
struct vpu_jsm_engine_reset_context
impacted_contexts[VPU_MAX_ENGINE_RESET_IMPACTED_CONTEXTS];
};
@@ -912,12 +1029,16 @@ struct vpu_ipc_msg_payload_unregister_db_done {
u32 reserved_0;
};
+/**
+ * Structure for heartbeat response
+ * @see VPU_JSM_MSG_QUERY_ENGINE_HB_DONE
+ */
struct vpu_ipc_msg_payload_query_engine_hb_done {
- /* Engine returning heartbeat value. */
+ /** Engine returning heartbeat value. */
u32 engine_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
- /* Heartbeat value. */
+ /** Heartbeat value. */
u64 heartbeat;
};
@@ -937,7 +1058,10 @@ struct vpu_ipc_msg_payload_get_power_level_count_done {
u8 power_limit[16];
};
-/* HWS priority band setup request / response */
+/**
+ * HWS priority band setup request / response
+ * @see VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP
+ */
struct vpu_ipc_msg_payload_hws_priority_band_setup {
/*
* Grace period in 100ns units when preempting another priority band for
@@ -964,15 +1088,23 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup {
* TDR timeout value in milliseconds. Default value of 0 meaning no timeout.
*/
u32 tdr_timeout;
+ /* Non-interactive queue timeout for no progress of heartbeat in milliseconds.
+ * Default value of 0 meaning no timeout.
+ */
+ u32 non_interactive_no_progress_timeout;
+ /*
+ * Non-interactive queue upper limit timeout value in milliseconds. Default
+ * value of 0 meaning no timeout.
+ */
+ u32 non_interactive_timeout;
};
-/*
+/**
* @brief HWS create command queue request.
* Host will create a command queue via this command.
* Note: Cmdq group is a handle of an object which
* may contain one or more command queues.
* @see VPU_JSM_MSG_CREATE_CMD_QUEUE
- * @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP
*/
struct vpu_ipc_msg_payload_hws_create_cmdq {
/* Process id */
@@ -993,66 +1125,73 @@ struct vpu_ipc_msg_payload_hws_create_cmdq {
u32 reserved_0;
};
-/*
- * @brief HWS create command queue response.
- * @see VPU_JSM_MSG_CREATE_CMD_QUEUE
+/**
+ * HWS create command queue response.
* @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP
*/
struct vpu_ipc_msg_payload_hws_create_cmdq_rsp {
- /* Process id */
+ /** Process id */
u64 process_id;
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Engine for which queue is being created */
+ /** Engine for which queue is being created */
u32 engine_idx;
- /* Command queue group */
+ /** Command queue group */
u64 cmdq_group;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
};
-/* HWS destroy command queue request / response */
+/**
+ * HWS destroy command queue request / response
+ * @see VPU_JSM_MSG_DESTROY_CMD_QUEUE
+ * @see VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP
+ */
struct vpu_ipc_msg_payload_hws_destroy_cmdq {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
};
-/* HWS set context scheduling properties request / response */
+/**
+ * HWS set context scheduling properties request / response
+ * @see VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES
+ * @see VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP
+ */
struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
- /*
+ /**
* Priority band to assign to work of this context.
* Available priority bands: @see enum vpu_job_scheduling_priority_band
*/
u32 priority_band;
- /* Inside realtime band assigns a further priority */
+ /** Inside realtime band assigns a further priority */
u32 realtime_priority_level;
- /* Priority relative to other contexts in the same process */
+ /** Priority relative to other contexts in the same process */
s32 in_process_priority;
- /* Zero padding / Reserved */
+ /** Zero padding / Reserved */
u32 reserved_1;
- /*
+ /**
* Context quantum relative to other contexts of same priority in the same process
* Minimum value supported by NPU is 1ms (10000 in 100ns units).
*/
u64 context_quantum;
- /* Grace period when preempting context of the same priority within the same process */
+ /** Grace period when preempting context of the same priority within the same process */
u64 grace_period_same_priority;
- /* Grace period when preempting context of a lower priority within the same process */
+ /** Grace period when preempting context of a lower priority within the same process */
u64 grace_period_lower_priority;
};
-/*
- * @brief Register doorbell command structure.
+/**
+ * Register doorbell command structure.
* This structure supports doorbell registration for both HW and OS scheduling.
* Note: Queue base and size are added here so that the same structure can be used for
* OS scheduling and HW scheduling. For OS scheduling, cmdq_id will be ignored
@@ -1061,27 +1200,27 @@ struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
* @see VPU_JSM_MSG_HWS_REGISTER_DB
*/
struct vpu_jsm_hws_register_db {
- /* Index of the doorbell to register. */
+ /** Index of the doorbell to register. */
u32 db_id;
- /* Host sub-stream ID for the context assigned to the doorbell. */
+ /** Host sub-stream ID for the context assigned to the doorbell. */
u32 host_ssid;
- /* ID of the command queue associated with the doorbell. */
+ /** ID of the command queue associated with the doorbell. */
u64 cmdq_id;
- /* Virtual address pointing to the start of command queue. */
+ /** Virtual address pointing to the start of command queue. */
u64 cmdq_base;
- /* Size of the command queue in bytes. */
+ /** Size of the command queue in bytes. */
u64 cmdq_size;
};
-/*
- * @brief Structure to set another buffer to be used for scheduling-related logging.
+/**
+ * Structure to set another buffer to be used for scheduling-related logging.
* The size of the logging buffer and the number of entries is defined as part of the
* buffer itself as described next.
* The log buffer received from the host is made up of;
- * - header: 32 bytes in size, as shown in 'struct vpu_hws_log_buffer_header'.
+ * - header: 32 bytes in size, as shown in @ref vpu_hws_log_buffer_header.
* The header contains the number of log entries in the buffer.
* - log entry: 0 to n-1, each log entry is 32 bytes in size, as shown in
- * 'struct vpu_hws_log_buffer_entry'.
+ * @ref vpu_hws_log_buffer_entry.
* The entry contains the VPU timestamp, operation type and data.
* The host should provide the notify index value of log buffer to VPU. This is a
* value defined within the log buffer and when written to will generate the
@@ -1095,30 +1234,30 @@ struct vpu_jsm_hws_register_db {
* @see VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION
*/
struct vpu_ipc_msg_payload_hws_set_scheduling_log {
- /* Engine ordinal */
+ /** Engine ordinal */
u32 engine_idx;
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /*
+ /**
* VPU log buffer virtual address.
* Set to 0 to disable logging for this engine.
*/
u64 vpu_log_buffer_va;
- /*
+ /**
* Notify index of log buffer. VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION
* is generated when an event log is written to this index.
*/
u64 notify_index;
- /*
+ /**
* Field is now deprecated, will be removed when KMD is updated to support removal
*/
u32 enable_extra_events;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
};
-/*
- * @brief The scheduling log notification is generated by VPU when it writes
+/**
+ * The scheduling log notification is generated by VPU when it writes
* an event into the log buffer at the notify_index. VPU notifies host with
* VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION. This is an asynchronous
* message from VPU to host.
@@ -1126,14 +1265,14 @@ struct vpu_ipc_msg_payload_hws_set_scheduling_log {
* @see VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG
*/
struct vpu_ipc_msg_payload_hws_scheduling_log_notification {
- /* Engine ordinal */
+ /** Engine ordinal */
u32 engine_idx;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
};
-/*
- * @brief HWS suspend command queue request and done structure.
+/**
+ * HWS suspend command queue request and done structure.
* Host will request the suspend of contexts and VPU will;
* - Suspend all work on this context
* - Preempt any running work
@@ -1152,21 +1291,21 @@ struct vpu_ipc_msg_payload_hws_scheduling_log_notification {
* @see VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE
*/
struct vpu_ipc_msg_payload_hws_suspend_cmdq {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
- /*
+ /**
* Suspend fence value - reported by the VPU suspend context
* completed once suspend is complete.
*/
u64 suspend_fence_value;
};
-/*
- * @brief HWS Resume command queue request / response structure.
+/**
+ * HWS Resume command queue request / response structure.
* Host will request the resume of a context;
* - VPU will resume all work on this context
* - Scheduler will allow this context to be scheduled
@@ -1174,25 +1313,25 @@ struct vpu_ipc_msg_payload_hws_suspend_cmdq {
* @see VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP
*/
struct vpu_ipc_msg_payload_hws_resume_cmdq {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
};
-/*
- * @brief HWS Resume engine request / response structure.
- * After a HWS engine reset, all scheduling is stopped on VPU until a engine resume.
+/**
+ * HWS Resume engine request / response structure.
+ * After a HWS engine reset, all scheduling is stopped on VPU until an engine resume.
* Host shall send this command to resume scheduling of any valid queue.
- * @see VPU_JSM_MSG_HWS_RESUME_ENGINE
+ * @see VPU_JSM_MSG_HWS_ENGINE_RESUME
* @see VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE
*/
struct vpu_ipc_msg_payload_hws_resume_engine {
- /* Engine to be resumed */
+ /** Engine to be resumed */
u32 engine_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
@@ -1326,7 +1465,7 @@ struct vpu_jsm_metric_streamer_done {
/**
* Metric group description placed in the metric buffer after successful completion
* of the VPU_JSM_MSG_METRIC_STREAMER_INFO command. This is followed by one or more
- * @vpu_jsm_metric_counter_descriptor records.
+ * @ref vpu_jsm_metric_counter_descriptor records.
* @see VPU_JSM_MSG_METRIC_STREAMER_INFO
*/
struct vpu_jsm_metric_group_descriptor {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 1679c2c3d505..2767e5122cb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -198,7 +198,7 @@ static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj);
amdgpu_hmm_unregister(aobj);
- ttm_bo_put(&aobj->tbo);
+ ttm_bo_fini(&aobj->tbo);
}
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
index 5d272916e200..122502968927 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
@@ -683,11 +683,6 @@ static void imx8qxp_ldb_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-static int imx8qxp_ldb_runtime_suspend(struct device *dev)
-{
- return 0;
-}
-
static int imx8qxp_ldb_runtime_resume(struct device *dev)
{
struct imx8qxp_ldb *imx8qxp_ldb = dev_get_drvdata(dev);
@@ -700,7 +695,7 @@ static int imx8qxp_ldb_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops imx8qxp_ldb_pm_ops = {
- RUNTIME_PM_OPS(imx8qxp_ldb_runtime_suspend, imx8qxp_ldb_runtime_resume, NULL)
+ RUNTIME_PM_OPS(NULL, imx8qxp_ldb_runtime_resume, NULL)
};
static const struct of_device_id imx8qxp_ldb_dt_ids[] = {
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index baacd21e7341..a5bdd6c10643 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -137,10 +137,9 @@ static void drm_bridge_connector_hpd_notify(struct drm_connector *connector,
{
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
- struct drm_bridge *bridge;
/* Notify all bridges in the pipeline of hotplug events. */
- drm_for_each_bridge_in_chain(bridge_connector->encoder, bridge) {
+ drm_for_each_bridge_in_chain_scoped(bridge_connector->encoder, bridge) {
if (bridge->funcs->hpd_notify)
bridge->funcs->hpd_notify(bridge, status);
}
@@ -639,7 +638,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
struct drm_bridge_connector *bridge_connector;
struct drm_connector *connector;
struct i2c_adapter *ddc = NULL;
- struct drm_bridge *bridge, *panel_bridge = NULL;
+ struct drm_bridge *panel_bridge = NULL;
unsigned int supported_formats = BIT(HDMI_COLORSPACE_RGB);
unsigned int max_bpc = 8;
bool support_hdcp = false;
@@ -667,7 +666,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
* detection are available, we don't support hotplug detection at all.
*/
connector_type = DRM_MODE_CONNECTOR_Unknown;
- drm_for_each_bridge_in_chain(encoder, bridge) {
+ drm_for_each_bridge_in_chain_scoped(encoder, bridge) {
if (!bridge->interlace_allowed)
connector->interlace_allowed = false;
if (!bridge->ycbcr_420_allowed)
@@ -818,7 +817,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (bridge_connector->bridge_hdmi_cec &&
bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) {
- bridge = bridge_connector->bridge_hdmi_cec;
+ struct drm_bridge *bridge = bridge_connector->bridge_hdmi_cec;
ret = drmm_connector_hdmi_cec_notifier_register(connector,
NULL,
@@ -829,7 +828,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (bridge_connector->bridge_hdmi_cec &&
bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) {
- bridge = bridge_connector->bridge_hdmi_cec;
+ struct drm_bridge *bridge = bridge_connector->bridge_hdmi_cec;
ret = drmm_connector_hdmi_cec_register(connector,
&drm_bridge_connector_hdmi_cec_funcs,
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index 4aaeae4fa03c..5426db21e53f 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -123,6 +123,14 @@ bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
}
EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
+bool drm_dp_post_lt_adj_req_in_progress(const u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ u8 lane_align = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED);
+
+ return lane_align & DP_POST_LT_ADJ_REQ_IN_PROGRESS;
+}
+EXPORT_SYMBOL(drm_dp_post_lt_adj_req_in_progress);
+
u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index cd15cf52f0c9..ed5359a71f7e 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1308,7 +1308,6 @@ drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
struct drm_encoder *encoder)
{
struct drm_bridge_state *bridge_state;
- struct drm_bridge *bridge;
if (!encoder)
return 0;
@@ -1317,7 +1316,7 @@ drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
"Adding all bridges for [encoder:%d:%s] to %p\n",
encoder->base.id, encoder->name, state);
- drm_for_each_bridge_in_chain(encoder, bridge) {
+ drm_for_each_bridge_in_chain_scoped(encoder, bridge) {
/* Skip bridges that don't implement the atomic state hooks. */
if (!bridge->funcs->atomic_duplicate_state)
continue;
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index d031447eebc9..5f265e8b4d49 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -1121,7 +1121,6 @@ drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
struct drm_bridge_state *bridge_state, *next_bridge_state;
- struct drm_bridge *next_bridge;
u32 output_flags = 0;
bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
@@ -1130,7 +1129,7 @@ drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
if (!bridge_state)
return;
- next_bridge = drm_bridge_get_next_bridge(bridge);
+ struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge);
/*
* Let's try to apply the most common case here, that is, propagate
@@ -1480,10 +1479,9 @@ static int encoder_bridges_show(struct seq_file *m, void *data)
{
struct drm_encoder *encoder = m->private;
struct drm_printer p = drm_seq_file_printer(m);
- struct drm_bridge *bridge;
unsigned int idx = 0;
- drm_for_each_bridge_in_chain(encoder, bridge)
+ drm_for_each_bridge_in_chain_scoped(encoder, bridge)
drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
return 0;
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 9c2c3b0c8c47..fc4caf7da5fc 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -1293,6 +1293,50 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode)
}
EXPORT_SYMBOL(drm_client_modeset_dpms);
+/**
+ * drm_client_modeset_wait_for_vblank() - Wait for the next VBLANK to occur
+ * @client: DRM client
+ * @crtc_index: The ndex of the CRTC to wait on
+ *
+ * Block the caller until the given CRTC has seen a VBLANK. Do nothing
+ * if the CRTC is disabled. If there's another DRM master present, fail
+ * with -EBUSY.
+ *
+ * Returns:
+ * 0 on success, or negative error code otherwise.
+ */
+int drm_client_modeset_wait_for_vblank(struct drm_client_dev *client, unsigned int crtc_index)
+{
+ struct drm_device *dev = client->dev;
+ struct drm_crtc *crtc;
+ int ret;
+
+ /*
+ * Rate-limit update frequency to vblank. If there's a DRM master
+ * present, it could interfere while we're waiting for the vblank
+ * event. Don't wait in this case.
+ */
+ if (!drm_master_internal_acquire(dev))
+ return -EBUSY;
+
+ crtc = client->modesets[crtc_index].crtc;
+
+ /*
+ * Only wait for a vblank event if the CRTC is enabled, otherwise
+ * just don't do anything, not even report an error.
+ */
+ ret = drm_crtc_vblank_get(crtc);
+ if (!ret) {
+ drm_crtc_wait_one_vblank(crtc);
+ drm_crtc_vblank_put(crtc);
+ }
+
+ drm_master_internal_release(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_client_modeset_wait_for_vblank);
+
#ifdef CONFIG_DRM_KUNIT_TEST
#include "tests/drm_client_modeset_test.c"
#endif
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 11a5b60cb9ce..53e9dc0543de 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -368,6 +368,10 @@ static void drm_fb_helper_fb_dirty(struct drm_fb_helper *helper)
unsigned long flags;
int ret;
+ mutex_lock(&helper->lock);
+ drm_client_modeset_wait_for_vblank(&helper->client, 0);
+ mutex_unlock(&helper->lock);
+
if (drm_WARN_ON_ONCE(dev, !helper->funcs->fb_dirty))
return;
@@ -1068,15 +1072,9 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
- struct drm_crtc *crtc;
int ret = 0;
- mutex_lock(&fb_helper->lock);
- if (!drm_master_internal_acquire(dev)) {
- ret = -EBUSY;
- goto unlock;
- }
+ guard(mutex)(&fb_helper->lock);
switch (cmd) {
case FBIO_WAITFORVSYNC:
@@ -1096,28 +1094,12 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
* make. If we're not smart enough here, one should
* just consider switch the userspace to KMS.
*/
- crtc = fb_helper->client.modesets[0].crtc;
-
- /*
- * Only wait for a vblank event if the CRTC is
- * enabled, otherwise just don't do anythintg,
- * not even report an error.
- */
- ret = drm_crtc_vblank_get(crtc);
- if (!ret) {
- drm_crtc_wait_one_vblank(crtc);
- drm_crtc_vblank_put(crtc);
- }
-
- ret = 0;
+ ret = drm_client_modeset_wait_for_vblank(&fb_helper->client, 0);
break;
default:
ret = -ENOTTY;
}
- drm_master_internal_release(dev);
-unlock:
- mutex_unlock(&fb_helper->lock);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_ioctl);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 8d25cc65707d..cbeb76b2124f 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -101,10 +101,8 @@ drm_gem_init(struct drm_device *dev)
vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
GFP_KERNEL);
- if (!vma_offset_manager) {
- DRM_ERROR("out of memory\n");
+ if (!vma_offset_manager)
return -ENOMEM;
- }
dev->vma_offset_manager = vma_offset_manager;
drm_vma_offset_manager_init(vma_offset_manager,
@@ -785,9 +783,10 @@ static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
int count, struct drm_gem_object ***objs_out)
{
- int ret;
- u32 *handles;
+ struct drm_device *dev = filp->minor->dev;
struct drm_gem_object **objs;
+ u32 *handles;
+ int ret;
if (!count)
return 0;
@@ -807,7 +806,7 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
ret = -EFAULT;
- DRM_DEBUG("Failed to copy in GEM handles\n");
+ drm_dbg_core(dev, "Failed to copy in GEM handles\n");
goto out;
}
@@ -855,12 +854,13 @@ EXPORT_SYMBOL(drm_gem_object_lookup);
long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
bool wait_all, unsigned long timeout)
{
- long ret;
+ struct drm_device *dev = filep->minor->dev;
struct drm_gem_object *obj;
+ long ret;
obj = drm_gem_object_lookup(filep, handle);
if (!obj) {
- DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
+ drm_dbg_core(dev, "Failed to look up GEM BO %d\n", handle);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c
index 4f0320df858f..a507cf517015 100644
--- a/drivers/gpu/drm/drm_gem_dma_helper.c
+++ b/drivers/gpu/drm/drm_gem_dma_helper.c
@@ -582,7 +582,7 @@ drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev,
ret = dma_buf_vmap_unlocked(attach->dmabuf, &map);
if (ret) {
- DRM_ERROR("Failed to vmap PRIME buffer\n");
+ drm_err(dev, "Failed to vmap PRIME buffer\n");
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 5d1349c34afd..50594cf8e17c 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -48,28 +48,12 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
.vm_ops = &drm_gem_shmem_vm_ops,
};
-static struct drm_gem_shmem_object *
-__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
- struct vfsmount *gemfs)
+static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem,
+ size_t size, bool private, struct vfsmount *gemfs)
{
- struct drm_gem_shmem_object *shmem;
- struct drm_gem_object *obj;
+ struct drm_gem_object *obj = &shmem->base;
int ret = 0;
- size = PAGE_ALIGN(size);
-
- if (dev->driver->gem_create_object) {
- obj = dev->driver->gem_create_object(dev, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
- shmem = to_drm_gem_shmem_obj(obj);
- } else {
- shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
- if (!shmem)
- return ERR_PTR(-ENOMEM);
- obj = &shmem->base;
- }
-
if (!obj->funcs)
obj->funcs = &drm_gem_shmem_funcs;
@@ -81,7 +65,7 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
}
if (ret) {
drm_gem_private_object_fini(obj);
- goto err_free;
+ return ret;
}
ret = drm_gem_create_mmap_offset(obj);
@@ -102,14 +86,55 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
__GFP_RETRY_MAYFAIL | __GFP_NOWARN);
}
- return shmem;
-
+ return 0;
err_release:
drm_gem_object_release(obj);
-err_free:
- kfree(obj);
+ return ret;
+}
- return ERR_PTR(ret);
+/**
+ * drm_gem_shmem_init - Initialize an allocated object.
+ * @dev: DRM device
+ * @obj: The allocated shmem GEM object.
+ *
+ * Returns:
+ * 0 on success, or a negative error code on failure.
+ */
+int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size)
+{
+ return __drm_gem_shmem_init(dev, shmem, size, false, NULL);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_init);
+
+static struct drm_gem_shmem_object *
+__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
+ struct vfsmount *gemfs)
+{
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ size = PAGE_ALIGN(size);
+
+ if (dev->driver->gem_create_object) {
+ obj = dev->driver->gem_create_object(dev, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+ shmem = to_drm_gem_shmem_obj(obj);
+ } else {
+ shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
+ if (!shmem)
+ return ERR_PTR(-ENOMEM);
+ obj = &shmem->base;
+ }
+
+ ret = __drm_gem_shmem_init(dev, shmem, size, private, gemfs);
+ if (ret) {
+ kfree(obj);
+ return ERR_PTR(ret);
+ }
+
+ return shmem;
}
/**
* drm_gem_shmem_create - Allocate an object with the given size
@@ -150,13 +175,13 @@ struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *de
EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt);
/**
- * drm_gem_shmem_free - Free resources associated with a shmem GEM object
- * @shmem: shmem GEM object to free
+ * drm_gem_shmem_release - Release resources associated with a shmem GEM object.
+ * @shmem: shmem GEM object
*
- * This function cleans up the GEM object state and frees the memory used to
- * store the object itself.
+ * This function cleans up the GEM object state, but does not free the memory used to store the
+ * object itself. This function is meant to be a dedicated helper for the Rust GEM bindings.
*/
-void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
@@ -183,6 +208,19 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
}
drm_gem_object_release(obj);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_release);
+
+/**
+ * drm_gem_shmem_free - Free resources associated with a shmem GEM object
+ * @shmem: shmem GEM object to free
+ *
+ * This function cleans up the GEM object state and frees the memory used to
+ * store the object itself.
+ */
+void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
+{
+ drm_gem_shmem_release(shmem);
kfree(shmem);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index b04cde4a60e7..90760d0ca071 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -107,7 +107,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
{
- /* We got here via ttm_bo_put(), which means that the
+ /* We got here via ttm_bo_fini(), which means that the
* TTM buffer object in 'bo' has already been cleaned
* up; only release the GEM object.
*/
@@ -234,11 +234,11 @@ EXPORT_SYMBOL(drm_gem_vram_create);
* drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object
* @gbo: the GEM VRAM object
*
- * See ttm_bo_put() for more information.
+ * See ttm_bo_fini() for more information.
*/
void drm_gem_vram_put(struct drm_gem_vram_object *gbo)
{
- ttm_bo_put(&gbo->bo);
+ ttm_bo_fini(&gbo->bo);
}
EXPORT_SYMBOL(drm_gem_vram_put);
diff --git a/drivers/gpu/drm/gud/gud_connector.c b/drivers/gpu/drm/gud/gud_connector.c
index 4a15695fa933..62e349b06dbe 100644
--- a/drivers/gpu/drm/gud/gud_connector.c
+++ b/drivers/gpu/drm/gud/gud_connector.c
@@ -561,11 +561,11 @@ static int gud_connector_add_properties(struct gud_device *gdrm, struct gud_conn
continue; /* not a DRM property */
property = gud_connector_property_lookup(connector, prop);
- if (WARN_ON(IS_ERR(property)))
+ if (drm_WARN_ON(drm, IS_ERR(property)))
continue;
state_val = gud_connector_tv_state_val(prop, &gconn->initial_tv_state);
- if (WARN_ON(IS_ERR(state_val)))
+ if (drm_WARN_ON(drm, IS_ERR(state_val)))
continue;
*state_val = val;
@@ -593,7 +593,7 @@ int gud_connector_fill_properties(struct drm_connector_state *connector_state,
unsigned int *state_val;
state_val = gud_connector_tv_state_val(prop, &connector_state->tv);
- if (WARN_ON_ONCE(IS_ERR(state_val)))
+ if (drm_WARN_ON_ONCE(connector_state->connector->dev, state_val))
return PTR_ERR(state_val);
val = *state_val;
@@ -667,7 +667,7 @@ static int gud_connector_create(struct gud_device *gdrm, unsigned int index,
return ret;
}
- if (WARN_ON(connector->index != index))
+ if (drm_WARN_ON(drm, connector->index != index))
return -EINVAL;
if (flags & GUD_CONNECTOR_FLAGS_POLL_STATUS)
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index 54d9aa9998e5..3a208e956dff 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -61,7 +61,7 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format
size_t len;
void *buf;
- WARN_ON_ONCE(format->char_per_block[0] != 1);
+ drm_WARN_ON_ONCE(fb->dev, format->char_per_block[0] != 1);
/* Start on a byte boundary */
rect->x1 = ALIGN_DOWN(rect->x1, block_width);
@@ -138,7 +138,7 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma
pix = ((r >> 7) << 2) | ((g >> 7) << 1) | (b >> 7);
break;
default:
- WARN_ON_ONCE(1);
+ drm_WARN_ON_ONCE(fb->dev, 1);
return len;
}
@@ -527,7 +527,7 @@ int gud_plane_atomic_check(struct drm_plane *plane,
drm_connector_list_iter_end(&conn_iter);
}
- if (WARN_ON_ONCE(!connector_state))
+ if (drm_WARN_ON_ONCE(plane->dev, !connector_state))
return -ENOENT;
len = struct_size(req, properties,
@@ -539,7 +539,7 @@ int gud_plane_atomic_check(struct drm_plane *plane,
gud_from_display_mode(&req->mode, mode);
req->format = gud_from_fourcc(format->format);
- if (WARN_ON_ONCE(!req->format)) {
+ if (drm_WARN_ON_ONCE(plane->dev, !req->format)) {
ret = -EINVAL;
goto out;
}
@@ -561,7 +561,7 @@ int gud_plane_atomic_check(struct drm_plane *plane,
val = new_plane_state->rotation;
break;
default:
- WARN_ON_ONCE(1);
+ drm_WARN_ON_ONCE(plane->dev, 1);
ret = -EINVAL;
goto out;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 1f4814968868..57bb111d65da 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -1029,7 +1029,7 @@ static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!obj->ttm.created);
- ttm_bo_put(i915_gem_to_ttm(obj));
+ ttm_bo_fini(i915_gem_to_ttm(obj));
}
static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
@@ -1325,7 +1325,7 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
* If this function fails, it will call the destructor, but
* our caller still owns the object. So no freeing in the
* destructor until obj->ttm.created is true.
- * Similarly, in delayed_destroy, we can't call ttm_bo_put()
+ * Similarly, in delayed_destroy, we can't call ttm_bo_fini()
* until successful initialization.
*/
ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), bo_type,
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-tve.c b/drivers/gpu/drm/imx/ipuv3/imx-tve.c
index c5629e155d25..63f23b821b0b 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-tve.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-tve.c
@@ -368,17 +368,20 @@ static unsigned long clk_tve_di_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long clk_tve_di_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_tve_di_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long div;
- div = *prate / rate;
+ div = req->best_parent_rate / req->rate;
if (div >= 4)
- return *prate / 4;
+ req->rate = req->best_parent_rate / 4;
else if (div >= 2)
- return *prate / 2;
- return *prate;
+ req->rate = req->best_parent_rate / 2;
+ else
+ req->rate = req->best_parent_rate;
+
+ return 0;
}
static int clk_tve_di_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -409,7 +412,7 @@ static int clk_tve_di_set_rate(struct clk_hw *hw, unsigned long rate,
}
static const struct clk_ops clk_tve_di_ops = {
- .round_rate = clk_tve_di_round_rate,
+ .determine_rate = clk_tve_di_determine_rate,
.set_rate = clk_tve_di_set_rate,
.recalc_rate = clk_tve_di_recalc_rate,
};
diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
index 6d8325c76697..dfdeb926fe9c 100644
--- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
@@ -134,10 +134,10 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge,
struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
struct drm_display_info *di = &conn_state->connector->display_info;
struct drm_bridge_state *next_bridge_state = NULL;
- struct drm_bridge *next_bridge;
u32 bus_flags, bus_fmt;
- next_bridge = drm_bridge_get_next_bridge(bridge);
+ struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge);
+
if (next_bridge)
next_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
next_bridge);
diff --git a/drivers/gpu/drm/loongson/lsdc_gem.c b/drivers/gpu/drm/loongson/lsdc_gem.c
index a720d8f53209..22d0eced95da 100644
--- a/drivers/gpu/drm/loongson/lsdc_gem.c
+++ b/drivers/gpu/drm/loongson/lsdc_gem.c
@@ -57,7 +57,7 @@ static void lsdc_gem_object_free(struct drm_gem_object *obj)
struct ttm_buffer_object *tbo = to_ttm_bo(obj);
if (tbo)
- ttm_bo_put(tbo);
+ ttm_bo_fini(tbo);
}
static int lsdc_gem_object_vmap(struct drm_gem_object *obj, struct iosys_map *map)
diff --git a/drivers/gpu/drm/mcde/mcde_clk_div.c b/drivers/gpu/drm/mcde/mcde_clk_div.c
index 3056ac566473..8c5af2677357 100644
--- a/drivers/gpu/drm/mcde/mcde_clk_div.c
+++ b/drivers/gpu/drm/mcde/mcde_clk_div.c
@@ -71,12 +71,15 @@ static int mcde_clk_div_choose_div(struct clk_hw *hw, unsigned long rate,
return best_div;
}
-static long mcde_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int mcde_clk_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- int div = mcde_clk_div_choose_div(hw, rate, prate, true);
+ int div = mcde_clk_div_choose_div(hw, req->rate,
+ &req->best_parent_rate, true);
- return DIV_ROUND_UP_ULL(*prate, div);
+ req->rate = DIV_ROUND_UP_ULL(req->best_parent_rate, div);
+
+ return 0;
}
static unsigned long mcde_clk_div_recalc_rate(struct clk_hw *hw,
@@ -132,7 +135,7 @@ static int mcde_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops mcde_clk_div_ops = {
.enable = mcde_clk_div_enable,
.recalc_rate = mcde_clk_div_recalc_rate,
- .round_rate = mcde_clk_div_round_rate,
+ .determine_rate = mcde_clk_div_determine_rate,
.set_rate = mcde_clk_div_set_rate,
};
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index c88776d1e784..3b5757aed9c8 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -28,6 +28,7 @@ config DRM_NOUVEAU
select THERMAL if ACPI && X86
select ACPI_VIDEO if ACPI && X86
select SND_HDA_COMPONENT if SND_HDA_CORE
+ select PM_DEVFREQ if ARCH_TEGRA
help
Choose this option for open-source NVIDIA support.
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
index 22f74fc88cd7..57bc542780bb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -9,6 +9,8 @@ struct nvkm_device_tegra {
struct nvkm_device device;
struct platform_device *pdev;
+ void __iomem *regs;
+
struct reset_control *rst;
struct clk *clk;
struct clk *clk_ref;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index d5d8877064a7..6a09d397c651 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -134,4 +134,5 @@ int gf100_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
int gk104_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
int gk20a_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
int gm20b_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int gp10b_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index d59fd12268b9..6c26beeb427f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -57,7 +57,7 @@ nouveau_bo(struct ttm_buffer_object *bo)
static inline void
nouveau_bo_fini(struct nouveau_bo *bo)
{
- ttm_bo_put(&bo->bo);
+ ttm_bo_fini(&bo->bo);
}
extern struct ttm_device_funcs nouveau_bo_driver;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 690e10fbf0bd..395d92ab6271 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -87,7 +87,7 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
return;
}
- ttm_bo_put(&nvbo->bo);
+ ttm_bo_fini(&nvbo->bo);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 8d5853deeee4..9fd351273236 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -21,6 +21,8 @@
*/
#include "nouveau_platform.h"
+#include <nvkm/subdev/clk/gk20a_devfreq.h>
+
static int nouveau_platform_probe(struct platform_device *pdev)
{
const struct nvkm_device_tegra_func *func;
@@ -40,6 +42,21 @@ static void nouveau_platform_remove(struct platform_device *pdev)
nouveau_drm_device_remove(drm);
}
+#ifdef CONFIG_PM_SLEEP
+static int nouveau_platform_suspend(struct device *dev)
+{
+ return gk20a_devfreq_suspend(dev);
+}
+
+static int nouveau_platform_resume(struct device *dev)
+{
+ return gk20a_devfreq_resume(dev);
+}
+
+static SIMPLE_DEV_PM_OPS(nouveau_pm_ops, nouveau_platform_suspend,
+ nouveau_platform_resume);
+#endif
+
#if IS_ENABLED(CONFIG_OF)
static const struct nvkm_device_tegra_func gk20a_platform_data = {
.iommu_bit = 34,
@@ -81,6 +98,9 @@ struct platform_driver nouveau_platform_driver = {
.driver = {
.name = "nouveau",
.of_match_table = of_match_ptr(nouveau_platform_match),
+#ifdef CONFIG_PM_SLEEP
+ .pm = &nouveau_pm_ops,
+#endif
},
.probe = nouveau_platform_probe,
.remove = nouveau_platform_remove,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 3375a59ebf1a..2517b65d8faa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2280,6 +2280,7 @@ nv13b_chipset = {
.acr = { 0x00000001, gp10b_acr_new },
.bar = { 0x00000001, gm20b_bar_new },
.bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gp10b_clk_new },
.fault = { 0x00000001, gp10b_fault_new },
.fb = { 0x00000001, gp10b_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 114e50ca1827..03aa6f09ec89 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -259,6 +259,10 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
tdev->func = func;
tdev->pdev = pdev;
+ tdev->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(tdev->regs))
+ return PTR_ERR(tdev->regs);
+
if (func->require_vdd) {
tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
if (IS_ERR(tdev->vdd)) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
index dcecd499d8df..be8f3283ee16 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
@@ -10,6 +10,8 @@ nvkm-y += nvkm/subdev/clk/gf100.o
nvkm-y += nvkm/subdev/clk/gk104.o
nvkm-y += nvkm/subdev/clk/gk20a.o
nvkm-y += nvkm/subdev/clk/gm20b.o
+nvkm-y += nvkm/subdev/clk/gp10b.o
+nvkm-$(CONFIG_PM_DEVFREQ) += nvkm/subdev/clk/gk20a_devfreq.o
nvkm-y += nvkm/subdev/clk/pllnv04.o
nvkm-y += nvkm/subdev/clk/pllgt215.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
index d573fb0917fc..65f5d0f1f3bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
@@ -23,6 +23,7 @@
*
*/
#include "priv.h"
+#include "gk20a_devfreq.h"
#include "gk20a.h"
#include <core/tegra.h>
@@ -589,6 +590,10 @@ gk20a_clk_init(struct nvkm_clk *base)
return ret;
}
+ ret = gk20a_devfreq_init(base, &clk->devfreq);
+ if (ret)
+ return ret;
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
index 286413ff4a9e..ea5b0bab4cce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
@@ -118,6 +118,7 @@ struct gk20a_clk {
const struct gk20a_clk_pllg_params *params;
struct gk20a_pll pll;
u32 parent_rate;
+ struct gk20a_devfreq *devfreq;
u32 (*div_to_pl)(u32);
u32 (*pl_to_div)(u32);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.c
new file mode 100644
index 000000000000..41003cbcdbfa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: MIT
+#include <linux/clk.h>
+#include <linux/math64.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+
+#include <drm/drm_managed.h>
+
+#include <subdev/clk.h>
+
+#include "nouveau_drv.h"
+#include "nouveau_chan.h"
+#include "priv.h"
+#include "gk20a_devfreq.h"
+#include "gk20a.h"
+#include "gp10b.h"
+
+#define PMU_BUSY_CYCLES_NORM_MAX 1000U
+
+#define PWR_PMU_IDLE_COUNTER_TOTAL 0U
+#define PWR_PMU_IDLE_COUNTER_BUSY 4U
+
+#define PWR_PMU_IDLE_COUNT_REG_OFFSET 0x0010A508U
+#define PWR_PMU_IDLE_COUNT_REG_SIZE 16U
+#define PWR_PMU_IDLE_COUNT_MASK 0x7FFFFFFFU
+#define PWR_PMU_IDLE_COUNT_RESET_VALUE (0x1U << 31U)
+
+#define PWR_PMU_IDLE_INTR_REG_OFFSET 0x0010A9E8U
+#define PWR_PMU_IDLE_INTR_ENABLE_VALUE 0U
+
+#define PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET 0x0010A9ECU
+#define PWR_PMU_IDLE_INTR_STATUS_MASK 0x00000001U
+#define PWR_PMU_IDLE_INTR_STATUS_RESET_VALUE 0x1U
+
+#define PWR_PMU_IDLE_THRESHOLD_REG_OFFSET 0x0010A8A0U
+#define PWR_PMU_IDLE_THRESHOLD_REG_SIZE 4U
+#define PWR_PMU_IDLE_THRESHOLD_MAX_VALUE 0x7FFFFFFFU
+
+#define PWR_PMU_IDLE_CTRL_REG_OFFSET 0x0010A50CU
+#define PWR_PMU_IDLE_CTRL_REG_SIZE 16U
+#define PWR_PMU_IDLE_CTRL_VALUE_MASK 0x3U
+#define PWR_PMU_IDLE_CTRL_VALUE_BUSY 0x2U
+#define PWR_PMU_IDLE_CTRL_VALUE_ALWAYS 0x3U
+#define PWR_PMU_IDLE_CTRL_FILTER_MASK (0x1U << 2)
+#define PWR_PMU_IDLE_CTRL_FILTER_DISABLED 0x0U
+
+#define PWR_PMU_IDLE_MASK_REG_OFFSET 0x0010A504U
+#define PWR_PMU_IDLE_MASK_REG_SIZE 16U
+#define PWM_PMU_IDLE_MASK_GR_ENABLED 0x1U
+#define PWM_PMU_IDLE_MASK_CE_2_ENABLED 0x200000U
+
+/**
+ * struct gk20a_devfreq - Device frequency management
+ */
+struct gk20a_devfreq {
+ /** @devfreq: devfreq device. */
+ struct devfreq *devfreq;
+
+ /** @regs: Device registers. */
+ void __iomem *regs;
+
+ /** @gov_data: Governor data. */
+ struct devfreq_simple_ondemand_data gov_data;
+
+ /** @busy_time: Busy time. */
+ ktime_t busy_time;
+
+ /** @total_time: Total time. */
+ ktime_t total_time;
+
+ /** @time_last_update: Last update time. */
+ ktime_t time_last_update;
+};
+
+static struct gk20a_devfreq *dev_to_gk20a_devfreq(struct device *dev)
+{
+ struct nouveau_drm *drm = dev_get_drvdata(dev);
+ struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0);
+ struct nvkm_clk *base = nvkm_clk(subdev);
+
+ switch (drm->nvkm->chipset) {
+ case 0x13b: return gp10b_clk(base)->devfreq; break;
+ default: return gk20a_clk(base)->devfreq; break;
+ }
+}
+
+static void gk20a_pmu_init_perfmon_counter(struct gk20a_devfreq *gdevfreq)
+{
+ u32 data;
+
+ // Set pmu idle intr status bit on total counter overflow
+ writel(PWR_PMU_IDLE_INTR_ENABLE_VALUE,
+ gdevfreq->regs + PWR_PMU_IDLE_INTR_REG_OFFSET);
+
+ writel(PWR_PMU_IDLE_THRESHOLD_MAX_VALUE,
+ gdevfreq->regs + PWR_PMU_IDLE_THRESHOLD_REG_OFFSET +
+ (PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_THRESHOLD_REG_SIZE));
+
+ // Setup counter for total cycles
+ data = readl(gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET +
+ (PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_CTRL_REG_SIZE));
+ data &= ~(PWR_PMU_IDLE_CTRL_VALUE_MASK | PWR_PMU_IDLE_CTRL_FILTER_MASK);
+ data |= PWR_PMU_IDLE_CTRL_VALUE_ALWAYS | PWR_PMU_IDLE_CTRL_FILTER_DISABLED;
+ writel(data, gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET +
+ (PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_CTRL_REG_SIZE));
+
+ // Setup counter for busy cycles
+ writel(PWM_PMU_IDLE_MASK_GR_ENABLED | PWM_PMU_IDLE_MASK_CE_2_ENABLED,
+ gdevfreq->regs + PWR_PMU_IDLE_MASK_REG_OFFSET +
+ (PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_MASK_REG_SIZE));
+
+ data = readl(gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET +
+ (PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_CTRL_REG_SIZE));
+ data &= ~(PWR_PMU_IDLE_CTRL_VALUE_MASK | PWR_PMU_IDLE_CTRL_FILTER_MASK);
+ data |= PWR_PMU_IDLE_CTRL_VALUE_BUSY | PWR_PMU_IDLE_CTRL_FILTER_DISABLED;
+ writel(data, gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET +
+ (PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_CTRL_REG_SIZE));
+}
+
+static u32 gk20a_pmu_read_idle_counter(struct gk20a_devfreq *gdevfreq, u32 counter_id)
+{
+ u32 ret;
+
+ ret = readl(gdevfreq->regs + PWR_PMU_IDLE_COUNT_REG_OFFSET +
+ (counter_id * PWR_PMU_IDLE_COUNT_REG_SIZE));
+
+ return ret & PWR_PMU_IDLE_COUNT_MASK;
+}
+
+static void gk20a_pmu_reset_idle_counter(struct gk20a_devfreq *gdevfreq, u32 counter_id)
+{
+ writel(PWR_PMU_IDLE_COUNT_RESET_VALUE, gdevfreq->regs + PWR_PMU_IDLE_COUNT_REG_OFFSET +
+ (counter_id * PWR_PMU_IDLE_COUNT_REG_SIZE));
+}
+
+static u32 gk20a_pmu_read_idle_intr_status(struct gk20a_devfreq *gdevfreq)
+{
+ u32 ret;
+
+ ret = readl(gdevfreq->regs + PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET);
+
+ return ret & PWR_PMU_IDLE_INTR_STATUS_MASK;
+}
+
+static void gk20a_pmu_clear_idle_intr_status(struct gk20a_devfreq *gdevfreq)
+{
+ writel(PWR_PMU_IDLE_INTR_STATUS_RESET_VALUE,
+ gdevfreq->regs + PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET);
+}
+
+static void gk20a_devfreq_update_utilization(struct gk20a_devfreq *gdevfreq)
+{
+ ktime_t now, last;
+ u64 busy_cycles, total_cycles;
+ u32 norm, intr_status;
+
+ now = ktime_get();
+ last = gdevfreq->time_last_update;
+ gdevfreq->total_time = ktime_us_delta(now, last);
+
+ busy_cycles = gk20a_pmu_read_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY);
+ total_cycles = gk20a_pmu_read_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL);
+ intr_status = gk20a_pmu_read_idle_intr_status(gdevfreq);
+
+ gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY);
+ gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL);
+
+ if (intr_status != 0UL) {
+ norm = PMU_BUSY_CYCLES_NORM_MAX;
+ gk20a_pmu_clear_idle_intr_status(gdevfreq);
+ } else if (total_cycles == 0ULL || busy_cycles > total_cycles) {
+ norm = PMU_BUSY_CYCLES_NORM_MAX;
+ } else {
+ norm = (u32)div64_u64(busy_cycles * PMU_BUSY_CYCLES_NORM_MAX,
+ total_cycles);
+ }
+
+ gdevfreq->busy_time = div_u64(gdevfreq->total_time * norm, PMU_BUSY_CYCLES_NORM_MAX);
+ gdevfreq->time_last_update = now;
+}
+
+static int gk20a_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct nouveau_drm *drm = dev_get_drvdata(dev);
+ struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0);
+ struct nvkm_clk *base = nvkm_clk(subdev);
+ struct nvkm_pstate *pstates = base->func->pstates;
+ int nr_pstates = base->func->nr_pstates;
+ int i, ret;
+
+ for (i = 0; i < nr_pstates - 1; i++)
+ if (pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV >= *freq)
+ break;
+
+ ret = nvkm_clk_ustate(base, pstates[i].pstate, 0);
+ ret |= nvkm_clk_ustate(base, pstates[i].pstate, 1);
+ if (ret) {
+ nvkm_error(subdev, "cannot update clock\n");
+ return ret;
+ }
+
+ *freq = pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV;
+
+ return 0;
+}
+
+static int gk20a_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct nouveau_drm *drm = dev_get_drvdata(dev);
+ struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0);
+ struct nvkm_clk *base = nvkm_clk(subdev);
+
+ *freq = nvkm_clk_read(base, nv_clk_src_gpc) * GK20A_CLK_GPC_MDIV;
+
+ return 0;
+}
+
+static void gk20a_devfreq_reset(struct gk20a_devfreq *gdevfreq)
+{
+ gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY);
+ gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL);
+ gk20a_pmu_clear_idle_intr_status(gdevfreq);
+
+ gdevfreq->busy_time = 0;
+ gdevfreq->total_time = 0;
+ gdevfreq->time_last_update = ktime_get();
+}
+
+static int gk20a_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *status)
+{
+ struct nouveau_drm *drm = dev_get_drvdata(dev);
+ struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev);
+
+ gk20a_devfreq_get_cur_freq(dev, &status->current_frequency);
+
+ gk20a_devfreq_update_utilization(gdevfreq);
+
+ status->busy_time = ktime_to_ns(gdevfreq->busy_time);
+ status->total_time = ktime_to_ns(gdevfreq->total_time);
+
+ gk20a_devfreq_reset(gdevfreq);
+
+ NV_DEBUG(drm, "busy %lu total %lu %lu %% freq %lu MHz\n",
+ status->busy_time, status->total_time,
+ status->busy_time / (status->total_time / 100),
+ status->current_frequency / 1000 / 1000);
+
+ return 0;
+}
+
+static struct devfreq_dev_profile gk20a_devfreq_profile = {
+ .timer = DEVFREQ_TIMER_DELAYED,
+ .polling_ms = 50,
+ .target = gk20a_devfreq_target,
+ .get_cur_freq = gk20a_devfreq_get_cur_freq,
+ .get_dev_status = gk20a_devfreq_get_dev_status,
+};
+
+int gk20a_devfreq_init(struct nvkm_clk *base, struct gk20a_devfreq **gdevfreq)
+{
+ struct nvkm_device *device = base->subdev.device;
+ struct nouveau_drm *drm = dev_get_drvdata(device->dev);
+ struct nvkm_device_tegra *tdev = device->func->tegra(device);
+ struct nvkm_pstate *pstates = base->func->pstates;
+ int nr_pstates = base->func->nr_pstates;
+ struct gk20a_devfreq *new_gdevfreq;
+ int i;
+
+ new_gdevfreq = drmm_kzalloc(drm->dev, sizeof(struct gk20a_devfreq), GFP_KERNEL);
+ if (!new_gdevfreq)
+ return -ENOMEM;
+
+ new_gdevfreq->regs = tdev->regs;
+
+ for (i = 0; i < nr_pstates; i++)
+ dev_pm_opp_add(base->subdev.device->dev,
+ pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV, 0);
+
+ gk20a_pmu_init_perfmon_counter(new_gdevfreq);
+ gk20a_devfreq_reset(new_gdevfreq);
+
+ gk20a_devfreq_profile.initial_freq =
+ nvkm_clk_read(base, nv_clk_src_gpc) * GK20A_CLK_GPC_MDIV;
+
+ new_gdevfreq->gov_data.upthreshold = 45;
+ new_gdevfreq->gov_data.downdifferential = 5;
+
+ new_gdevfreq->devfreq = devm_devfreq_add_device(device->dev,
+ &gk20a_devfreq_profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ &new_gdevfreq->gov_data);
+ if (IS_ERR(new_gdevfreq->devfreq))
+ return PTR_ERR(new_gdevfreq->devfreq);
+
+ *gdevfreq = new_gdevfreq;
+
+ return 0;
+}
+
+int gk20a_devfreq_resume(struct device *dev)
+{
+ struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev);
+
+ if (!gdevfreq || !gdevfreq->devfreq)
+ return 0;
+
+ return devfreq_resume_device(gdevfreq->devfreq);
+}
+
+int gk20a_devfreq_suspend(struct device *dev)
+{
+ struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev);
+
+ if (!gdevfreq || !gdevfreq->devfreq)
+ return 0;
+
+ return devfreq_suspend_device(gdevfreq->devfreq);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.h
new file mode 100644
index 000000000000..5b7ca8a7a5cd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __GK20A_DEVFREQ_H__
+#define __GK20A_DEVFREQ_H__
+
+#include <linux/devfreq.h>
+
+struct gk20a_devfreq;
+
+#if defined(CONFIG_PM_DEVFREQ)
+int gk20a_devfreq_init(struct nvkm_clk *base, struct gk20a_devfreq **devfreq);
+
+int gk20a_devfreq_resume(struct device *dev);
+int gk20a_devfreq_suspend(struct device *dev);
+#else
+static inline int gk20a_devfreq_init(struct nvkm_clk *base, struct gk20a_devfreq **devfreq)
+{
+ return 0;
+}
+
+static inline int gk20a_devfreq_resume(struct device dev) { return 0; }
+static inline int gk20a_devfreq_suspend(struct device *dev) { return 0; }
+#endif /* CONFIG_PM_DEVFREQ */
+
+#endif /* __GK20A_DEVFREQ_H__ */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
index 7c33542f651b..fa8ca53acbd1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
@@ -27,6 +27,7 @@
#include <core/tegra.h>
#include "priv.h"
+#include "gk20a_devfreq.h"
#include "gk20a.h"
#define GPCPLL_CFG_SYNC_MODE BIT(2)
@@ -869,6 +870,10 @@ gm20b_clk_init(struct nvkm_clk *base)
return ret;
}
+ ret = gk20a_devfreq_init(base, &clk->devfreq);
+ if (ret)
+ return ret;
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.c
new file mode 100644
index 000000000000..492b62c0ee96
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: MIT
+#include <subdev/clk.h>
+#include <subdev/timer.h>
+#include <core/device.h>
+#include <core/tegra.h>
+
+#include "priv.h"
+#include "gk20a_devfreq.h"
+#include "gk20a.h"
+#include "gp10b.h"
+
+static int
+gp10b_clk_init(struct nvkm_clk *base)
+{
+ struct gp10b_clk *clk = gp10b_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ int ret;
+
+ /* Start with the highest frequency, matching the BPMP default */
+ base->func->calc(base, &base->func->pstates[base->func->nr_pstates - 1].base);
+ ret = base->func->prog(base);
+ if (ret) {
+ nvkm_error(subdev, "cannot initialize clock\n");
+ return ret;
+ }
+
+ ret = gk20a_devfreq_init(base, &clk->devfreq);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+gp10b_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
+{
+ struct gp10b_clk *clk = gp10b_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+
+ switch (src) {
+ case nv_clk_src_gpc:
+ return clk_get_rate(clk->clk) / GK20A_CLK_GPC_MDIV;
+ default:
+ nvkm_error(subdev, "invalid clock source %d\n", src);
+ return -EINVAL;
+ }
+}
+
+static int
+gp10b_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
+{
+ struct gp10b_clk *clk = gp10b_clk(base);
+ u32 target_rate = cstate->domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV;
+
+ clk->new_rate = clk_round_rate(clk->clk, target_rate) / GK20A_CLK_GPC_MDIV;
+
+ return 0;
+}
+
+static int
+gp10b_clk_prog(struct nvkm_clk *base)
+{
+ struct gp10b_clk *clk = gp10b_clk(base);
+ int ret;
+
+ ret = clk_set_rate(clk->clk, clk->new_rate * GK20A_CLK_GPC_MDIV);
+ if (ret < 0)
+ return ret;
+
+ clk->rate = clk_get_rate(clk->clk) / GK20A_CLK_GPC_MDIV;
+
+ return 0;
+}
+
+static struct nvkm_pstate
+gp10b_pstates[] = {
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 114750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 216750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 318750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 420750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 522750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 624750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 726750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 828750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 930750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 1032750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 1134750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 1236750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 1300500,
+ },
+ },
+};
+
+static const struct nvkm_clk_func
+gp10b_clk = {
+ .init = gp10b_clk_init,
+ .read = gp10b_clk_read,
+ .calc = gp10b_clk_calc,
+ .prog = gp10b_clk_prog,
+ .tidy = gk20a_clk_tidy,
+ .pstates = gp10b_pstates,
+ .nr_pstates = ARRAY_SIZE(gp10b_pstates),
+ .domains = {
+ { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
+ { nv_clk_src_max }
+ }
+};
+
+int
+gp10b_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
+{
+ struct nvkm_device_tegra *tdev = device->func->tegra(device);
+ const struct nvkm_clk_func *func = &gp10b_clk;
+ struct gp10b_clk *clk;
+ int ret, i;
+
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+ if (!clk)
+ return -ENOMEM;
+ *pclk = &clk->base;
+ clk->clk = tdev->clk;
+
+ /* Finish initializing the pstates */
+ for (i = 0; i < func->nr_pstates; i++) {
+ INIT_LIST_HEAD(&func->pstates[i].list);
+ func->pstates[i].pstate = i + 1;
+ }
+
+ ret = nvkm_clk_ctor(func, device, type, inst, true, &clk->base);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.h
new file mode 100644
index 000000000000..178e3bcdbbf7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_CLK_GP10B_H__
+#define __NVKM_CLK_GP10B_H__
+
+struct gp10b_clk {
+ /* currently applied parameters */
+ struct nvkm_clk base;
+ struct gk20a_devfreq *devfreq;
+ struct clk *clk;
+ u32 rate;
+
+ /* new parameters to apply */
+ u32 new_rate;
+};
+
+#define gp10b_clk(p) container_of((p), struct gp10b_clk, base)
+
+#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 4dd05bc732da..195715b162e3 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -77,7 +77,6 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
struct omap_dss_device *output = omap_encoder->output;
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
- struct drm_bridge *bridge;
struct videomode vm = { 0 };
u32 bus_flags;
@@ -97,8 +96,7 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
*
* A better solution is to use DRM's bus-flags through the whole driver.
*/
- for (bridge = output->bridge; bridge;
- bridge = drm_bridge_get_next_bridge(bridge)) {
+ drm_for_each_bridge_in_chain_from(output->bridge, bridge) {
if (!bridge->timings)
continue;
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 62435e3cd9f4..ad6b03b59b52 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -1909,6 +1909,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x8bba, &delay_200_500_e50, "B140UAN08.5"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xa199, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xa7b3, &delay_200_500_e50, "B140UAN04.4"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0xb7a9, &delay_200_500_e50, "B140HAK03.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xc4b4, &delay_200_500_e50, "B116XAT04.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xc9a8, &delay_200_500_e50, "B140QAN08.H"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xcdba, &delay_200_500_e50, "B140UAX01.2"),
@@ -1974,6 +1975,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c93, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cf2, &delay_200_500_e200, "NV156FHM-N4S"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cf6, &delay_200_500_e200, "NV140WUM-N64"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cfa, &delay_200_500_e50, "NV116WHM-A4D"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0d45, &delay_200_500_e80, "NV116WHM-N4B"),
@@ -2007,10 +2009,12 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1441, &delay_200_500_e80_d50, "N140JCA-ELK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x144f, &delay_200_500_e80_d50, "N140HGA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1468, &delay_200_500_e80, "N140HGA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x148f, &delay_200_500_e80, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14a8, &delay_200_500_e80, "N140JCA-ELP"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1565, &delay_200_500_e80, "N156HCA-EAB"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x162b, &delay_200_500_e80_d50, "N160JCE-ELL"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x7402, &delay_200_500_e200_d50, "N116BCA-EAK"),
@@ -2022,10 +2026,12 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50_d100, "MNB601LS1-4"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x143f, &delay_200_500_e50, "MNE007QS3-6"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1448, &delay_200_500_e50, "MNE007QS3-7"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x144b, &delay_200_500_e80, "MNE001BS1-4"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1457, &delay_80_500_e80_p2e200, "MNE007QS3-8"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1462, &delay_200_500_e50, "MNE007QS5-2"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1468, &delay_200_500_e50, "MNE007QB2-2"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x146e, &delay_80_500_e50_d50, "MNE007QB3-1"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1519, &delay_200_500_e80_d50, "MNF601BS1-3"),
EDP_PANEL_ENTRY('E', 'T', 'C', 0x0000, &delay_50_500_e200_d200_po2e335, "LP079QX1-SP0V"),
@@ -2046,6 +2052,8 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1212, &delay_200_500_e50, "KD116N0930A16"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1707, &delay_200_150_e50, "KD116N2130B12"),
+ EDP_PANEL_ENTRY('K', 'D', 'C', 0x0110, &delay_200_500_e50, "KD116N3730A07"),
+ EDP_PANEL_ENTRY('K', 'D', 'C', 0x0397, &delay_200_500_e50, "KD116N3730A12"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x044f, &delay_200_500_e50, "KD116N9-30NH-F3"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x05f1, &delay_200_500_e80_d50, "KD116N5-30NV-G7"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x0809, &delay_200_500_e50, "KD116N2930A15"),
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 077525a3ad68..1e73efad02a8 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -10,11 +10,13 @@
#include <linux/pm.h>
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
+#include <drm/drm_auth.h>
#include <drm/drm_device.h>
#include <drm/drm_mm.h>
#include <drm/gpu_scheduler.h>
#include "panfrost_devfreq.h"
+#include "panfrost_job.h"
struct panfrost_device;
struct panfrost_mmu;
@@ -22,7 +24,6 @@ struct panfrost_job_slot;
struct panfrost_job;
struct panfrost_perfcnt;
-#define NUM_JOB_SLOTS 3
#define MAX_PM_DOMAINS 5
enum panfrost_drv_comp_bits {
@@ -206,13 +207,19 @@ struct panfrost_engine_usage {
struct panfrost_file_priv {
struct panfrost_device *pfdev;
- struct drm_sched_entity sched_entity[NUM_JOB_SLOTS];
+ struct xarray jm_ctxs;
struct panfrost_mmu *mmu;
struct panfrost_engine_usage engine_usage;
};
+static inline bool panfrost_high_prio_allowed(struct drm_file *file)
+{
+ /* Higher priorities require CAP_SYS_NICE or DRM_MASTER */
+ return (capable(CAP_SYS_NICE) || drm_is_current_master(file));
+}
+
static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev)
{
return ddev->dev_private;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 1ea6c509a5d5..22350ce8a08f 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -109,6 +109,14 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
#endif
break;
+ case DRM_PANFROST_PARAM_ALLOWED_JM_CTX_PRIORITIES:
+ param->value = BIT(PANFROST_JM_CTX_PRIORITY_LOW) |
+ BIT(PANFROST_JM_CTX_PRIORITY_MEDIUM);
+
+ if (panfrost_high_prio_allowed(file))
+ param->value |= BIT(PANFROST_JM_CTX_PRIORITY_HIGH);
+ break;
+
default:
return -EINVAL;
}
@@ -279,9 +287,13 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
struct panfrost_file_priv *file_priv = file->driver_priv;
struct drm_panfrost_submit *args = data;
struct drm_syncobj *sync_out = NULL;
+ struct panfrost_jm_ctx *jm_ctx;
struct panfrost_job *job;
int ret = 0, slot;
+ if (args->pad)
+ return -EINVAL;
+
if (!args->jc)
return -EINVAL;
@@ -294,10 +306,16 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
return -ENODEV;
}
+ jm_ctx = panfrost_jm_ctx_from_handle(file, args->jm_ctx_handle);
+ if (!jm_ctx) {
+ ret = -EINVAL;
+ goto out_put_syncout;
+ }
+
job = kzalloc(sizeof(*job), GFP_KERNEL);
if (!job) {
ret = -ENOMEM;
- goto out_put_syncout;
+ goto out_put_jm_ctx;
}
kref_init(&job->refcount);
@@ -307,12 +325,13 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
job->requirements = args->requirements;
job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
job->mmu = file_priv->mmu;
+ job->ctx = panfrost_jm_ctx_get(jm_ctx);
job->engine_usage = &file_priv->engine_usage;
slot = panfrost_job_get_slot(job);
ret = drm_sched_job_init(&job->base,
- &file_priv->sched_entity[slot],
+ &jm_ctx->slot_entity[slot],
1, NULL, file->client_id);
if (ret)
goto out_put_job;
@@ -338,6 +357,8 @@ out_cleanup_job:
drm_sched_job_cleanup(&job->base);
out_put_job:
panfrost_job_put(job);
+out_put_jm_ctx:
+ panfrost_jm_ctx_put(jm_ctx);
out_put_syncout:
if (sync_out)
drm_syncobj_put(sync_out);
@@ -536,6 +557,27 @@ err_put_obj:
return ret;
}
+static int panfrost_ioctl_jm_ctx_create(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ return panfrost_jm_ctx_create(file, data);
+}
+
+static int panfrost_ioctl_jm_ctx_destroy(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ const struct drm_panfrost_jm_ctx_destroy *args = data;
+
+ if (args->pad)
+ return -EINVAL;
+
+ /* We can't destroy the default context created when the file is opened. */
+ if (!args->handle)
+ return -EINVAL;
+
+ return panfrost_jm_ctx_destroy(file, args->handle);
+}
+
int panfrost_unstable_ioctl_check(void)
{
if (!unstable_ioctls)
@@ -564,7 +606,7 @@ panfrost_open(struct drm_device *dev, struct drm_file *file)
goto err_free;
}
- ret = panfrost_job_open(panfrost_priv);
+ ret = panfrost_job_open(file);
if (ret)
goto err_job;
@@ -583,7 +625,7 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file)
struct panfrost_file_priv *panfrost_priv = file->driver_priv;
panfrost_perfcnt_close(file);
- panfrost_job_close(panfrost_priv);
+ panfrost_job_close(file);
panfrost_mmu_ctx_put(panfrost_priv->mmu);
kfree(panfrost_priv);
@@ -603,6 +645,8 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW),
PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW),
PANFROST_IOCTL(SET_LABEL_BO, set_label_bo, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(JM_CTX_CREATE, jm_ctx_create, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(JM_CTX_DESTROY, jm_ctx_destroy, DRM_RENDER_ALLOW),
};
static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev,
@@ -672,6 +716,47 @@ static int panthor_gems_show(struct seq_file *m, void *data)
return 0;
}
+static void show_panfrost_jm_ctx(struct panfrost_jm_ctx *jm_ctx, u32 handle,
+ struct seq_file *m)
+{
+ struct drm_device *ddev = ((struct drm_info_node *)m->private)->minor->dev;
+ const char *prio = "UNKNOWN";
+
+ static const char * const prios[] = {
+ [DRM_SCHED_PRIORITY_HIGH] = "HIGH",
+ [DRM_SCHED_PRIORITY_NORMAL] = "NORMAL",
+ [DRM_SCHED_PRIORITY_LOW] = "LOW",
+ };
+
+ if (jm_ctx->slot_entity[0].priority !=
+ jm_ctx->slot_entity[1].priority)
+ drm_warn(ddev, "Slot priorities should be the same in a single context");
+
+ if (jm_ctx->slot_entity[0].priority < ARRAY_SIZE(prios))
+ prio = prios[jm_ctx->slot_entity[0].priority];
+
+ seq_printf(m, " JM context %u: priority %s\n", handle, prio);
+}
+
+static int show_file_jm_ctxs(struct panfrost_file_priv *pfile,
+ struct seq_file *m)
+{
+ struct panfrost_jm_ctx *jm_ctx;
+ unsigned long i;
+
+ xa_lock(&pfile->jm_ctxs);
+ xa_for_each(&pfile->jm_ctxs, i, jm_ctx) {
+ jm_ctx = panfrost_jm_ctx_get(jm_ctx);
+ xa_unlock(&pfile->jm_ctxs);
+ show_panfrost_jm_ctx(jm_ctx, i, m);
+ panfrost_jm_ctx_put(jm_ctx);
+ xa_lock(&pfile->jm_ctxs);
+ }
+ xa_unlock(&pfile->jm_ctxs);
+
+ return 0;
+}
+
static struct drm_info_list panthor_debugfs_list[] = {
{"gems", panthor_gems_show, 0, NULL},
};
@@ -685,9 +770,64 @@ static int panthor_gems_debugfs_init(struct drm_minor *minor)
return 0;
}
+static int show_each_file(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *ddev = node->minor->dev;
+ int (*show)(struct panfrost_file_priv *, struct seq_file *) =
+ node->info_ent->data;
+ struct drm_file *file;
+ int ret;
+
+ ret = mutex_lock_interruptible(&ddev->filelist_mutex);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(file, &ddev->filelist, lhead) {
+ struct task_struct *task;
+ struct panfrost_file_priv *pfile = file->driver_priv;
+ struct pid *pid;
+
+ /*
+ * Although we have a valid reference on file->pid, that does
+ * not guarantee that the task_struct who called get_pid() is
+ * still alive (e.g. get_pid(current) => fork() => exit()).
+ * Therefore, we need to protect this ->comm access using RCU.
+ */
+ rcu_read_lock();
+ pid = rcu_dereference(file->pid);
+ task = pid_task(pid, PIDTYPE_TGID);
+ seq_printf(m, "client_id %8llu pid %8d command %s:\n",
+ file->client_id, pid_nr(pid),
+ task ? task->comm : "<unknown>");
+ rcu_read_unlock();
+
+ ret = show(pfile, m);
+ if (ret < 0)
+ break;
+
+ seq_puts(m, "\n");
+ }
+
+ mutex_unlock(&ddev->filelist_mutex);
+ return ret;
+}
+
+static struct drm_info_list panfrost_sched_debugfs_list[] = {
+ { "sched_ctxs", show_each_file, 0, show_file_jm_ctxs },
+};
+
+static void panfrost_sched_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_create_files(panfrost_sched_debugfs_list,
+ ARRAY_SIZE(panfrost_sched_debugfs_list),
+ minor->debugfs_root, minor);
+}
+
static void panfrost_debugfs_init(struct drm_minor *minor)
{
panthor_gems_debugfs_init(minor);
+ panfrost_sched_debugfs_init(minor);
}
#endif
@@ -699,6 +839,8 @@ static void panfrost_debugfs_init(struct drm_minor *minor)
* - 1.3 - adds JD_REQ_CYCLE_COUNT job requirement for SUBMIT
* - adds SYSTEM_TIMESTAMP and SYSTEM_TIMESTAMP_FREQUENCY queries
* - 1.4 - adds SET_LABEL_BO
+ * - 1.5 - adds JM_CTX_{CREATE,DESTROY} ioctls and extend SUBMIT to allow
+ * context creation with configurable priorities/affinity
*/
static const struct drm_driver panfrost_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
@@ -711,7 +853,7 @@ static const struct drm_driver panfrost_drm_driver = {
.name = "panfrost",
.desc = "panfrost DRM",
.major = 1,
- .minor = 4,
+ .minor = 5,
.gem_create_object = panfrost_gem_create_object,
.gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 82acabb21b27..c47d14eabbae 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -22,6 +22,7 @@
#include "panfrost_mmu.h"
#include "panfrost_dump.h"
+#define MAX_JM_CTX_PER_FILE 64
#define JOB_TIMEOUT_MS 500
#define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
@@ -359,6 +360,7 @@ static void panfrost_job_cleanup(struct kref *ref)
kvfree(job->bos);
}
+ panfrost_jm_ctx_put(job->ctx);
kfree(job);
}
@@ -383,6 +385,9 @@ static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
int slot = panfrost_job_get_slot(job);
struct dma_fence *fence = NULL;
+ if (job->ctx->destroyed)
+ return ERR_PTR(-ECANCELED);
+
if (unlikely(job->base.s_fence->finished.error))
return NULL;
@@ -917,39 +922,176 @@ void panfrost_job_fini(struct panfrost_device *pfdev)
destroy_workqueue(pfdev->reset.wq);
}
-int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
+int panfrost_job_open(struct drm_file *file)
+{
+ struct panfrost_file_priv *panfrost_priv = file->driver_priv;
+ int ret;
+
+ struct drm_panfrost_jm_ctx_create default_jm_ctx = {
+ .priority = PANFROST_JM_CTX_PRIORITY_MEDIUM,
+ };
+
+ xa_init_flags(&panfrost_priv->jm_ctxs, XA_FLAGS_ALLOC);
+
+ ret = panfrost_jm_ctx_create(file, &default_jm_ctx);
+ if (ret)
+ return ret;
+
+ /* We expect the default context to be assigned handle 0. */
+ if (WARN_ON(default_jm_ctx.handle))
+ return -EINVAL;
+
+ return 0;
+}
+
+void panfrost_job_close(struct drm_file *file)
+{
+ struct panfrost_file_priv *panfrost_priv = file->driver_priv;
+ struct panfrost_jm_ctx *jm_ctx;
+ unsigned long i;
+
+ xa_for_each(&panfrost_priv->jm_ctxs, i, jm_ctx)
+ panfrost_jm_ctx_destroy(file, i);
+
+ xa_destroy(&panfrost_priv->jm_ctxs);
+}
+
+int panfrost_job_is_idle(struct panfrost_device *pfdev)
{
- struct panfrost_device *pfdev = panfrost_priv->pfdev;
struct panfrost_job_slot *js = pfdev->js;
- struct drm_gpu_scheduler *sched;
- int ret, i;
+ int i;
for (i = 0; i < NUM_JOB_SLOTS; i++) {
- sched = &js->queue[i].sched;
- ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
- DRM_SCHED_PRIORITY_NORMAL, &sched,
- 1, NULL);
- if (WARN_ON(ret))
- return ret;
+ /* If there are any jobs in the HW queue, we're not idle */
+ if (atomic_read(&js->queue[i].sched.credit_count))
+ return false;
+ }
+
+ return true;
+}
+
+static void panfrost_jm_ctx_release(struct kref *kref)
+{
+ struct panfrost_jm_ctx *jm_ctx = container_of(kref, struct panfrost_jm_ctx, refcnt);
+
+ WARN_ON(!jm_ctx->destroyed);
+
+ for (u32 i = 0; i < ARRAY_SIZE(jm_ctx->slot_entity); i++)
+ drm_sched_entity_destroy(&jm_ctx->slot_entity[i]);
+
+ kfree(jm_ctx);
+}
+
+void
+panfrost_jm_ctx_put(struct panfrost_jm_ctx *jm_ctx)
+{
+ if (jm_ctx)
+ kref_put(&jm_ctx->refcnt, panfrost_jm_ctx_release);
+}
+
+struct panfrost_jm_ctx *
+panfrost_jm_ctx_get(struct panfrost_jm_ctx *jm_ctx)
+{
+ if (jm_ctx)
+ kref_get(&jm_ctx->refcnt);
+
+ return jm_ctx;
+}
+
+struct panfrost_jm_ctx *
+panfrost_jm_ctx_from_handle(struct drm_file *file, u32 handle)
+{
+ struct panfrost_file_priv *priv = file->driver_priv;
+ struct panfrost_jm_ctx *jm_ctx;
+
+ xa_lock(&priv->jm_ctxs);
+ jm_ctx = panfrost_jm_ctx_get(xa_load(&priv->jm_ctxs, handle));
+ xa_unlock(&priv->jm_ctxs);
+
+ return jm_ctx;
+}
+
+static int jm_ctx_prio_to_drm_sched_prio(struct drm_file *file,
+ enum drm_panfrost_jm_ctx_priority in,
+ enum drm_sched_priority *out)
+{
+ switch (in) {
+ case PANFROST_JM_CTX_PRIORITY_LOW:
+ *out = DRM_SCHED_PRIORITY_LOW;
+ return 0;
+ case PANFROST_JM_CTX_PRIORITY_MEDIUM:
+ *out = DRM_SCHED_PRIORITY_NORMAL;
+ return 0;
+ case PANFROST_JM_CTX_PRIORITY_HIGH:
+ if (!panfrost_high_prio_allowed(file))
+ return -EACCES;
+
+ *out = DRM_SCHED_PRIORITY_HIGH;
+ return 0;
+ default:
+ return -EINVAL;
}
+}
+
+int panfrost_jm_ctx_create(struct drm_file *file,
+ struct drm_panfrost_jm_ctx_create *args)
+{
+ struct panfrost_file_priv *priv = file->driver_priv;
+ struct panfrost_device *pfdev = priv->pfdev;
+ enum drm_sched_priority sched_prio;
+ struct panfrost_jm_ctx *jm_ctx;
+ int ret;
+
+ jm_ctx = kzalloc(sizeof(*jm_ctx), GFP_KERNEL);
+ if (!jm_ctx)
+ return -ENOMEM;
+
+ kref_init(&jm_ctx->refcnt);
+
+ ret = jm_ctx_prio_to_drm_sched_prio(file, args->priority, &sched_prio);
+ if (ret)
+ goto err_put_jm_ctx;
+
+ for (u32 i = 0; i < NUM_JOB_SLOTS; i++) {
+ struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
+
+ ret = drm_sched_entity_init(&jm_ctx->slot_entity[i], sched_prio,
+ &sched, 1, NULL);
+ if (ret)
+ goto err_put_jm_ctx;
+ }
+
+ ret = xa_alloc(&priv->jm_ctxs, &args->handle, jm_ctx,
+ XA_LIMIT(0, MAX_JM_CTX_PER_FILE), GFP_KERNEL);
+ if (ret)
+ goto err_put_jm_ctx;
+
return 0;
+
+err_put_jm_ctx:
+ jm_ctx->destroyed = true;
+ panfrost_jm_ctx_put(jm_ctx);
+ return ret;
}
-void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
+int panfrost_jm_ctx_destroy(struct drm_file *file, u32 handle)
{
- struct panfrost_device *pfdev = panfrost_priv->pfdev;
- int i;
+ struct panfrost_file_priv *priv = file->driver_priv;
+ struct panfrost_device *pfdev = priv->pfdev;
+ struct panfrost_jm_ctx *jm_ctx;
- for (i = 0; i < NUM_JOB_SLOTS; i++)
- drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
+ jm_ctx = xa_erase(&priv->jm_ctxs, handle);
+ if (!jm_ctx)
+ return -EINVAL;
+
+ jm_ctx->destroyed = true;
/* Kill in-flight jobs */
spin_lock(&pfdev->js->job_lock);
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- struct drm_sched_entity *entity = &panfrost_priv->sched_entity[i];
- int j;
+ for (u32 i = 0; i < ARRAY_SIZE(jm_ctx->slot_entity); i++) {
+ struct drm_sched_entity *entity = &jm_ctx->slot_entity[i];
- for (j = ARRAY_SIZE(pfdev->jobs[0]) - 1; j >= 0; j--) {
+ for (int j = ARRAY_SIZE(pfdev->jobs[0]) - 1; j >= 0; j--) {
struct panfrost_job *job = pfdev->jobs[i][j];
u32 cmd;
@@ -980,18 +1122,7 @@ void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
}
}
spin_unlock(&pfdev->js->job_lock);
-}
-
-int panfrost_job_is_idle(struct panfrost_device *pfdev)
-{
- struct panfrost_job_slot *js = pfdev->js;
- int i;
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- /* If there are any jobs in the HW queue, we're not idle */
- if (atomic_read(&js->queue[i].sched.credit_count))
- return false;
- }
-
- return true;
+ panfrost_jm_ctx_put(jm_ctx);
+ return 0;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
index ec581b97852b..5a30ff1503c6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.h
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -18,6 +18,7 @@ struct panfrost_job {
struct panfrost_device *pfdev;
struct panfrost_mmu *mmu;
+ struct panfrost_jm_ctx *ctx;
/* Fence to be signaled by IRQ handler when the job is complete. */
struct dma_fence *done_fence;
@@ -39,10 +40,30 @@ struct panfrost_job {
u64 start_cycles;
};
+struct panfrost_js_ctx {
+ struct drm_sched_entity sched_entity;
+ bool enabled;
+};
+
+#define NUM_JOB_SLOTS 3
+
+struct panfrost_jm_ctx {
+ struct kref refcnt;
+ bool destroyed;
+ struct drm_sched_entity slot_entity[NUM_JOB_SLOTS];
+};
+
+int panfrost_jm_ctx_create(struct drm_file *file,
+ struct drm_panfrost_jm_ctx_create *args);
+int panfrost_jm_ctx_destroy(struct drm_file *file, u32 handle);
+void panfrost_jm_ctx_put(struct panfrost_jm_ctx *jm_ctx);
+struct panfrost_jm_ctx *panfrost_jm_ctx_get(struct panfrost_jm_ctx *jm_ctx);
+struct panfrost_jm_ctx *panfrost_jm_ctx_from_handle(struct drm_file *file, u32 handle);
+
int panfrost_job_init(struct panfrost_device *pfdev);
void panfrost_job_fini(struct panfrost_device *pfdev);
-int panfrost_job_open(struct panfrost_file_priv *panfrost_priv);
-void panfrost_job_close(struct panfrost_file_priv *panfrost_priv);
+int panfrost_job_open(struct drm_file *file);
+void panfrost_job_close(struct drm_file *file);
int panfrost_job_get_slot(struct panfrost_job *job);
int panfrost_job_push(struct panfrost_job *job);
void panfrost_job_put(struct panfrost_job *job);
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index 4c202fc5ce05..fdbe89ef7f43 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -1105,7 +1105,7 @@ static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
if (ret)
goto out;
- ret = panthor_group_create(pfile, args, queue_args);
+ ret = panthor_group_create(pfile, args, queue_args, file->client_id);
if (ret < 0)
goto out;
args->group_handle = ret;
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index ba5dc3e443d9..0cc9055f4ee5 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -360,6 +360,9 @@ struct panthor_queue {
/** @entity: DRM scheduling entity used for this queue. */
struct drm_sched_entity entity;
+ /** @name: DRM scheduler name for this queue. */
+ char *name;
+
/**
* @remaining_time: Time remaining before the job timeout expires.
*
@@ -901,6 +904,8 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue *
if (queue->scheduler.ops)
drm_sched_fini(&queue->scheduler);
+ kfree(queue->name);
+
panthor_queue_put_syncwait_obj(queue);
panthor_kernel_bo_destroy(queue->ringbuf);
@@ -1412,7 +1417,7 @@ cs_slot_process_fault_event_locked(struct panthor_device *ptdev,
fault = cs_iface->output->fault;
info = cs_iface->output->fault_info;
- if (queue && CS_EXCEPTION_TYPE(fault) == DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT) {
+ if (queue) {
u64 cs_extract = queue->iface.output->extract;
struct panthor_job *job;
@@ -3308,9 +3313,10 @@ static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev,
static struct panthor_queue *
group_create_queue(struct panthor_group *group,
- const struct drm_panthor_queue_create *args)
+ const struct drm_panthor_queue_create *args,
+ u64 drm_client_id, u32 gid, u32 qid)
{
- const struct drm_sched_init_args sched_args = {
+ struct drm_sched_init_args sched_args = {
.ops = &panthor_queue_sched_ops,
.submit_wq = group->ptdev->scheduler->wq,
.num_rqs = 1,
@@ -3323,7 +3329,6 @@ group_create_queue(struct panthor_group *group,
.credit_limit = args->ringbuf_size / sizeof(u64),
.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
.timeout_wq = group->ptdev->reset.wq,
- .name = "panthor-queue",
.dev = group->ptdev->base.dev,
};
struct drm_gpu_scheduler *drm_sched;
@@ -3398,6 +3403,15 @@ group_create_queue(struct panthor_group *group,
if (ret)
goto err_free_queue;
+ /* assign a unique name */
+ queue->name = kasprintf(GFP_KERNEL, "panthor-queue-%llu-%u-%u", drm_client_id, gid, qid);
+ if (!queue->name) {
+ ret = -ENOMEM;
+ goto err_free_queue;
+ }
+
+ sched_args.name = queue->name;
+
ret = drm_sched_init(&queue->scheduler, &sched_args);
if (ret)
goto err_free_queue;
@@ -3447,7 +3461,8 @@ static void add_group_kbo_sizes(struct panthor_device *ptdev,
int panthor_group_create(struct panthor_file *pfile,
const struct drm_panthor_group_create *group_args,
- const struct drm_panthor_queue_create *queue_args)
+ const struct drm_panthor_queue_create *queue_args,
+ u64 drm_client_id)
{
struct panthor_device *ptdev = pfile->ptdev;
struct panthor_group_pool *gpool = pfile->groups;
@@ -3540,12 +3555,16 @@ int panthor_group_create(struct panthor_file *pfile,
memset(group->syncobjs->kmap, 0,
group_args->queues.count * sizeof(struct panthor_syncobj_64b));
+ ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
+ if (ret)
+ goto err_put_group;
+
for (i = 0; i < group_args->queues.count; i++) {
- group->queues[i] = group_create_queue(group, &queue_args[i]);
+ group->queues[i] = group_create_queue(group, &queue_args[i], drm_client_id, gid, i);
if (IS_ERR(group->queues[i])) {
ret = PTR_ERR(group->queues[i]);
group->queues[i] = NULL;
- goto err_put_group;
+ goto err_erase_gid;
}
group->queue_count++;
@@ -3553,10 +3572,6 @@ int panthor_group_create(struct panthor_file *pfile,
group->idle_queues = GENMASK(group->queue_count - 1, 0);
- ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
- if (ret)
- goto err_put_group;
-
mutex_lock(&sched->reset.lock);
if (atomic_read(&sched->reset.in_progress)) {
panthor_group_stop(group);
@@ -3575,6 +3590,9 @@ int panthor_group_create(struct panthor_file *pfile,
return gid;
+err_erase_gid:
+ xa_erase(&gpool->xa, gid);
+
err_put_group:
group_put(group);
return ret;
diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h
index 742b0b4ff3a3..f4a475aa34c0 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.h
+++ b/drivers/gpu/drm/panthor/panthor_sched.h
@@ -21,7 +21,8 @@ struct panthor_job;
int panthor_group_create(struct panthor_file *pfile,
const struct drm_panthor_group_create *group_args,
- const struct drm_panthor_queue_create *queue_args);
+ const struct drm_panthor_queue_create *queue_args,
+ u64 drm_client_id);
int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle);
int panthor_group_get_state(struct panthor_file *pfile,
struct drm_panthor_group_get_state *get_state);
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index b9fe926a49e8..6d567e5c7c6f 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -473,12 +473,15 @@ static int pl111_clk_div_choose_div(struct clk_hw *hw, unsigned long rate,
return best_div;
}
-static long pl111_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int pl111_clk_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- int div = pl111_clk_div_choose_div(hw, rate, prate, true);
+ int div = pl111_clk_div_choose_div(hw, req->rate,
+ &req->best_parent_rate, true);
- return DIV_ROUND_UP_ULL(*prate, div);
+ req->rate = DIV_ROUND_UP_ULL(req->best_parent_rate, div);
+
+ return 0;
}
static unsigned long pl111_clk_div_recalc_rate(struct clk_hw *hw,
@@ -528,7 +531,7 @@ static int pl111_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops pl111_clk_div_ops = {
.recalc_rate = pl111_clk_div_recalc_rate,
- .round_rate = pl111_clk_div_round_rate,
+ .determine_rate = pl111_clk_div_determine_rate,
.set_rate = pl111_clk_div_set_rate,
};
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index fc5e3763c359..d26043424e95 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -39,7 +39,7 @@ void qxl_gem_object_free(struct drm_gem_object *gobj)
qxl_surface_evict(qdev, qobj, false);
tbo = &qobj->tbo;
- ttm_bo_put(tbo);
+ ttm_bo_fini(tbo);
}
int qxl_gem_object_create(struct qxl_device *qdev, int size,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index f86773f3db20..18ca1bcfd2f9 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -86,7 +86,7 @@ static void radeon_gem_object_free(struct drm_gem_object *gobj)
if (robj) {
radeon_mn_unregister(robj);
- ttm_bo_put(&robj->tbo);
+ ttm_bo_fini(&robj->tbo);
}
}
diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h
index 7f31d35780cc..553d45abd057 100644
--- a/drivers/gpu/drm/scheduler/tests/sched_tests.h
+++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h
@@ -31,9 +31,8 @@
*
* @base: DRM scheduler base class
* @test: Backpointer to owning the kunit test case
- * @lock: Lock to protect the simulated @hw_timeline, @job_list and @done_list
+ * @lock: Lock to protect the simulated @hw_timeline and @job_list
* @job_list: List of jobs submitted to the mock GPU
- * @done_list: List of jobs completed by the mock GPU
* @hw_timeline: Simulated hardware timeline has a @context, @next_seqno and
* @cur_seqno for implementing a struct dma_fence signaling the
* simulated job completion.
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index eec43d1a5595..8368f0ffbe1e 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -1498,7 +1498,7 @@ static int ssd130x_crtc_atomic_check(struct drm_crtc *crtc,
if (ret)
return ret;
- ssd130x_state->data_array = kmalloc(ssd130x->width * pages, GFP_KERNEL);
+ ssd130x_state->data_array = kmalloc_array(ssd130x->width, pages, GFP_KERNEL);
if (!ssd130x_state->data_array)
return -ENOMEM;
@@ -1519,7 +1519,7 @@ static int ssd132x_crtc_atomic_check(struct drm_crtc *crtc,
if (ret)
return ret;
- ssd130x_state->data_array = kmalloc(columns * ssd130x->height, GFP_KERNEL);
+ ssd130x_state->data_array = kmalloc_array(columns, ssd130x->height, GFP_KERNEL);
if (!ssd130x_state->data_array)
return -ENOMEM;
@@ -1546,7 +1546,7 @@ static int ssd133x_crtc_atomic_check(struct drm_crtc *crtc,
pitch = drm_format_info_min_pitch(fi, 0, ssd130x->width);
- ssd130x_state->data_array = kmalloc(pitch * ssd130x->height, GFP_KERNEL);
+ ssd130x_state->data_array = kmalloc_array(pitch, ssd130x->height, GFP_KERNEL);
if (!ssd130x_state->data_array)
return -ENOMEM;
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index 2c7bc064bc66..58eae6804cc8 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -274,8 +274,8 @@ static unsigned long dw_mipi_dsi_clk_recalc_rate(struct clk_hw *hw,
return (unsigned long)pll_out_khz * 1000;
}
-static long dw_mipi_dsi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int dw_mipi_dsi_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dw_mipi_dsi_stm *dsi = clk_to_dw_mipi_dsi_stm(hw);
unsigned int idf, ndiv, odf, pll_in_khz, pll_out_khz;
@@ -283,14 +283,14 @@ static long dw_mipi_dsi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
DRM_DEBUG_DRIVER("\n");
- pll_in_khz = (unsigned int)(*parent_rate / 1000);
+ pll_in_khz = (unsigned int)(req->best_parent_rate / 1000);
/* Compute best pll parameters */
idf = 0;
ndiv = 0;
odf = 0;
- ret = dsi_pll_get_params(dsi, pll_in_khz, rate / 1000,
+ ret = dsi_pll_get_params(dsi, pll_in_khz, req->rate / 1000,
&idf, &ndiv, &odf);
if (ret)
DRM_WARN("Warning dsi_pll_get_params(): bad params\n");
@@ -298,7 +298,9 @@ static long dw_mipi_dsi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
/* Get the adjusted pll out value */
pll_out_khz = dsi_pll_get_clkout_khz(pll_in_khz, idf, ndiv, odf);
- return pll_out_khz * 1000;
+ req->rate = pll_out_khz * 1000;
+
+ return 0;
}
static int dw_mipi_dsi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -351,7 +353,7 @@ static const struct clk_ops dw_mipi_dsi_stm_clk_ops = {
.disable = dw_mipi_dsi_clk_disable,
.is_enabled = dw_mipi_dsi_clk_is_enabled,
.recalc_rate = dw_mipi_dsi_clk_recalc_rate,
- .round_rate = dw_mipi_dsi_clk_round_rate,
+ .determine_rate = dw_mipi_dsi_clk_determine_rate,
.set_rate = dw_mipi_dsi_clk_set_rate,
};
diff --git a/drivers/gpu/drm/stm/lvds.c b/drivers/gpu/drm/stm/lvds.c
index 07788e8d3d83..fe38c0984b2b 100644
--- a/drivers/gpu/drm/stm/lvds.c
+++ b/drivers/gpu/drm/stm/lvds.c
@@ -682,8 +682,8 @@ static unsigned long lvds_pixel_clk_recalc_rate(struct clk_hw *hw,
return (unsigned long)lvds->pixel_clock_rate;
}
-static long lvds_pixel_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int lvds_pixel_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct stm_lvds *lvds = container_of(hw, struct stm_lvds, lvds_ck_px);
unsigned int pll_in_khz, bdiv = 0, mdiv = 0, ndiv = 0;
@@ -703,7 +703,7 @@ static long lvds_pixel_clk_round_rate(struct clk_hw *hw, unsigned long rate,
mode = list_first_entry(&connector->modes,
struct drm_display_mode, head);
- pll_in_khz = (unsigned int)(*parent_rate / 1000);
+ pll_in_khz = (unsigned int)(req->best_parent_rate / 1000);
if (lvds_is_dual_link(lvds->link_type))
multiplier = 2;
@@ -719,14 +719,16 @@ static long lvds_pixel_clk_round_rate(struct clk_hw *hw, unsigned long rate,
lvds->pixel_clock_rate = (unsigned long)pll_get_clkout_khz(pll_in_khz, bdiv, mdiv, ndiv)
* 1000 * multiplier / 7;
- return lvds->pixel_clock_rate;
+ req->rate = lvds->pixel_clock_rate;
+
+ return 0;
}
static const struct clk_ops lvds_pixel_clk_ops = {
.enable = lvds_pixel_clk_enable,
.disable = lvds_pixel_clk_disable,
.recalc_rate = lvds_pixel_clk_recalc_rate,
- .round_rate = lvds_pixel_clk_round_rate,
+ .determine_rate = lvds_pixel_clk_determine_rate,
};
static const struct clk_init_data clk_data = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
index 12430b9d4e93..b1beadb9bb59 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
@@ -59,13 +59,15 @@ static unsigned long sun4i_ddc_calc_divider(unsigned long rate,
return best_rate;
}
-static long sun4i_ddc_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int sun4i_ddc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sun4i_ddc *ddc = hw_to_ddc(hw);
- return sun4i_ddc_calc_divider(rate, *prate, ddc->pre_div,
- ddc->m_offset, NULL, NULL);
+ req->rate = sun4i_ddc_calc_divider(req->rate, req->best_parent_rate,
+ ddc->pre_div, ddc->m_offset, NULL, NULL);
+
+ return 0;
}
static unsigned long sun4i_ddc_recalc_rate(struct clk_hw *hw,
@@ -101,7 +103,7 @@ static int sun4i_ddc_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops sun4i_ddc_ops = {
.recalc_rate = sun4i_ddc_recalc_rate,
- .round_rate = sun4i_ddc_round_rate,
+ .determine_rate = sun4i_ddc_determine_rate,
.set_rate = sun4i_ddc_set_rate,
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c b/drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c
index 03d7de1911cd..4afb12bd5281 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c
@@ -67,8 +67,8 @@ static unsigned long sun4i_dclk_recalc_rate(struct clk_hw *hw,
return parent_rate / val;
}
-static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int sun4i_dclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sun4i_dclk *dclk = hw_to_dclk(hw);
struct sun4i_tcon *tcon = dclk->tcon;
@@ -77,7 +77,7 @@ static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
int i;
for (i = tcon->dclk_min_div; i <= tcon->dclk_max_div; i++) {
- u64 ideal = (u64)rate * i;
+ u64 ideal = (u64)req->rate * i;
unsigned long rounded;
/*
@@ -99,17 +99,19 @@ static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
goto out;
}
- if (abs(rate - rounded / i) <
- abs(rate - best_parent / best_div)) {
+ if (abs(req->rate - rounded / i) <
+ abs(req->rate - best_parent / best_div)) {
best_parent = rounded;
best_div = i;
}
}
out:
- *parent_rate = best_parent;
+ req->best_parent_rate = best_parent;
- return best_parent / best_div;
+ req->rate = best_parent / best_div;
+
+ return 0;
}
static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -155,7 +157,7 @@ static const struct clk_ops sun4i_dclk_ops = {
.is_enabled = sun4i_dclk_is_enabled,
.recalc_rate = sun4i_dclk_recalc_rate,
- .round_rate = sun4i_dclk_round_rate,
+ .determine_rate = sun4i_dclk_determine_rate,
.set_rate = sun4i_dclk_set_rate,
.get_phase = sun4i_dclk_get_phase,
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 8cd2969e7d4b..c4820f5e7658 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -658,7 +658,7 @@ static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, const void *data,
{
const u8 *ptr = data;
unsigned long offset;
- size_t i, j;
+ size_t i;
u32 value;
switch (ptr[0]) {
@@ -691,7 +691,7 @@ static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, const void *data,
* - subpack_low: bytes 0 - 3
* - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
*/
- for (i = 3, j = 0; i < size; i += 7, j += 8) {
+ for (i = 3; i < size; i += 7) {
size_t rem = size - i, num = min_t(size_t, rem, 4);
value = tegra_hdmi_subpack(&ptr[i], num);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 21f3dfdcc5c9..bc7dd562cf6b 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -1864,7 +1864,7 @@ static void tegra_sor_hdmi_write_infopack(struct tegra_sor *sor,
{
const u8 *ptr = data;
unsigned long offset;
- size_t i, j;
+ size_t i;
u32 value;
switch (ptr[0]) {
@@ -1897,7 +1897,7 @@ static void tegra_sor_hdmi_write_infopack(struct tegra_sor *sor,
* - subpack_low: bytes 0 - 3
* - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
*/
- for (i = 3, j = 0; i < size; i += 7, j += 8) {
+ for (i = 3; i < size; i += 7) {
size_t rem = size - i, num = min_t(size_t, rem, 4);
value = tegra_sor_hdmi_subpack(&ptr[i], num);
diff --git a/drivers/gpu/drm/tests/.kunitconfig b/drivers/gpu/drm/tests/.kunitconfig
index 6ec04b4c979d..5be8e71f45d5 100644
--- a/drivers/gpu/drm/tests/.kunitconfig
+++ b/drivers/gpu/drm/tests/.kunitconfig
@@ -1,3 +1,5 @@
CONFIG_KUNIT=y
CONFIG_DRM=y
+CONFIG_DRM_VKMS=y
+CONFIG_DRM_FBDEV_EMULATION=y
CONFIG_DRM_KUNIT_TEST=y
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
index 6c77550c51af..5426b435f702 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
@@ -379,7 +379,7 @@ static void ttm_bo_unreserve_bulk(struct kunit *test)
dma_resv_fini(resv);
}
-static void ttm_bo_put_basic(struct kunit *test)
+static void ttm_bo_fini_basic(struct kunit *test)
{
struct ttm_test_devices *priv = test->priv;
struct ttm_buffer_object *bo;
@@ -410,7 +410,7 @@ static void ttm_bo_put_basic(struct kunit *test)
dma_resv_unlock(bo->base.resv);
KUNIT_EXPECT_EQ(test, err, 0);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static const char *mock_name(struct dma_fence *f)
@@ -423,7 +423,7 @@ static const struct dma_fence_ops mock_fence_ops = {
.get_timeline_name = mock_name,
};
-static void ttm_bo_put_shared_resv(struct kunit *test)
+static void ttm_bo_fini_shared_resv(struct kunit *test)
{
struct ttm_test_devices *priv = test->priv;
struct ttm_buffer_object *bo;
@@ -463,7 +463,7 @@ static void ttm_bo_put_shared_resv(struct kunit *test)
bo->type = ttm_bo_type_device;
bo->base.resv = external_resv;
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static void ttm_bo_pin_basic(struct kunit *test)
@@ -616,8 +616,8 @@ static struct kunit_case ttm_bo_test_cases[] = {
KUNIT_CASE(ttm_bo_unreserve_basic),
KUNIT_CASE(ttm_bo_unreserve_pinned),
KUNIT_CASE(ttm_bo_unreserve_bulk),
- KUNIT_CASE(ttm_bo_put_basic),
- KUNIT_CASE(ttm_bo_put_shared_resv),
+ KUNIT_CASE(ttm_bo_fini_basic),
+ KUNIT_CASE(ttm_bo_fini_shared_resv),
KUNIT_CASE(ttm_bo_pin_basic),
KUNIT_CASE(ttm_bo_pin_unpin_resource),
KUNIT_CASE(ttm_bo_multiple_pin_one_unpin),
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
index 1bcc67977f48..3a1eef83190c 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
@@ -144,7 +144,7 @@ static void ttm_bo_init_reserved_sys_man(struct kunit *test)
drm_mm_node_allocated(&bo->base.vma_node.vm_node));
ttm_resource_free(bo, &bo->resource);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static void ttm_bo_init_reserved_mock_man(struct kunit *test)
@@ -186,7 +186,7 @@ static void ttm_bo_init_reserved_mock_man(struct kunit *test)
drm_mm_node_allocated(&bo->base.vma_node.vm_node));
ttm_resource_free(bo, &bo->resource);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
}
@@ -221,7 +221,7 @@ static void ttm_bo_init_reserved_resv(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, bo->base.resv, &resv);
ttm_resource_free(bo, &bo->resource);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static void ttm_bo_validate_basic(struct kunit *test)
@@ -265,7 +265,7 @@ static void ttm_bo_validate_basic(struct kunit *test)
KUNIT_EXPECT_EQ(test, bo->resource->placement,
DRM_BUDDY_TOPDOWN_ALLOCATION);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
}
@@ -292,7 +292,7 @@ static void ttm_bo_validate_invalid_placement(struct kunit *test)
KUNIT_EXPECT_EQ(test, err, -ENOMEM);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static void ttm_bo_validate_failed_alloc(struct kunit *test)
@@ -321,7 +321,7 @@ static void ttm_bo_validate_failed_alloc(struct kunit *test)
KUNIT_EXPECT_EQ(test, err, -ENOMEM);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
ttm_bad_manager_fini(priv->ttm_dev, mem_type);
}
@@ -353,7 +353,7 @@ static void ttm_bo_validate_pinned(struct kunit *test)
ttm_bo_unpin(bo);
dma_resv_unlock(bo->base.resv);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static const struct ttm_bo_validate_test_case ttm_mem_type_cases[] = {
@@ -403,7 +403,7 @@ static void ttm_bo_validate_same_placement(struct kunit *test)
KUNIT_EXPECT_EQ(test, err, 0);
KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, 0);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
if (params->mem_type != TTM_PL_SYSTEM)
ttm_mock_manager_fini(priv->ttm_dev, params->mem_type);
@@ -452,7 +452,7 @@ static void ttm_bo_validate_busy_placement(struct kunit *test)
KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem);
KUNIT_ASSERT_TRUE(test, list_is_singular(&man->lru[bo->priority]));
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
ttm_bad_manager_fini(priv->ttm_dev, fst_mem);
ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
}
@@ -495,7 +495,7 @@ static void ttm_bo_validate_multihop(struct kunit *test)
KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2);
KUNIT_EXPECT_EQ(test, bo->resource->mem_type, final_mem);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
ttm_mock_manager_fini(priv->ttm_dev, fst_mem);
ttm_mock_manager_fini(priv->ttm_dev, tmp_mem);
@@ -567,7 +567,7 @@ static void ttm_bo_validate_no_placement_signaled(struct kunit *test)
KUNIT_ASSERT_TRUE(test, flags & TTM_TT_FLAG_ZERO_ALLOC);
}
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static int threaded_dma_resv_signal(void *arg)
@@ -635,7 +635,7 @@ static void ttm_bo_validate_no_placement_not_signaled(struct kunit *test)
/* Make sure we have an idle object at this point */
dma_resv_wait_timeout(bo->base.resv, usage, false, MAX_SCHEDULE_TIMEOUT);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static void ttm_bo_validate_move_fence_signaled(struct kunit *test)
@@ -668,7 +668,7 @@ static void ttm_bo_validate_move_fence_signaled(struct kunit *test)
KUNIT_EXPECT_EQ(test, bo->resource->mem_type, mem_type);
KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
dma_fence_put(man->move);
}
@@ -753,7 +753,7 @@ static void ttm_bo_validate_move_fence_not_signaled(struct kunit *test)
else
KUNIT_EXPECT_EQ(test, bo->resource->mem_type, fst_mem);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
ttm_mock_manager_fini(priv->ttm_dev, fst_mem);
ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
}
@@ -807,8 +807,8 @@ static void ttm_bo_validate_happy_evict(struct kunit *test)
KUNIT_EXPECT_EQ(test, bos[1].resource->mem_type, mem_type);
for (i = 0; i < bo_no; i++)
- ttm_bo_put(&bos[i]);
- ttm_bo_put(bo_val);
+ ttm_bo_fini(&bos[i]);
+ ttm_bo_fini(bo_val);
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
@@ -852,12 +852,12 @@ static void ttm_bo_validate_all_pinned_evict(struct kunit *test)
KUNIT_EXPECT_EQ(test, err, -ENOMEM);
- ttm_bo_put(bo_small);
+ ttm_bo_fini(bo_small);
ttm_bo_reserve(bo_big, false, false, NULL);
ttm_bo_unpin(bo_big);
dma_resv_unlock(bo_big->base.resv);
- ttm_bo_put(bo_big);
+ ttm_bo_fini(bo_big);
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
@@ -916,13 +916,13 @@ static void ttm_bo_validate_allowed_only_evict(struct kunit *test)
KUNIT_EXPECT_EQ(test, bo_evictable->resource->mem_type, mem_type_evict);
KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2 + BO_SIZE);
- ttm_bo_put(bo);
- ttm_bo_put(bo_evictable);
+ ttm_bo_fini(bo);
+ ttm_bo_fini(bo_evictable);
ttm_bo_reserve(bo_pinned, false, false, NULL);
ttm_bo_unpin(bo_pinned);
dma_resv_unlock(bo_pinned->base.resv);
- ttm_bo_put(bo_pinned);
+ ttm_bo_fini(bo_pinned);
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
@@ -973,8 +973,8 @@ static void ttm_bo_validate_deleted_evict(struct kunit *test)
KUNIT_EXPECT_NULL(test, bo_big->ttm);
KUNIT_EXPECT_NULL(test, bo_big->resource);
- ttm_bo_put(bo_small);
- ttm_bo_put(bo_big);
+ ttm_bo_fini(bo_small);
+ ttm_bo_fini(bo_big);
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
}
@@ -1025,8 +1025,8 @@ static void ttm_bo_validate_busy_domain_evict(struct kunit *test)
KUNIT_EXPECT_EQ(test, bo_init->resource->mem_type, mem_type);
KUNIT_EXPECT_NULL(test, bo_val->resource);
- ttm_bo_put(bo_init);
- ttm_bo_put(bo_val);
+ ttm_bo_fini(bo_init);
+ ttm_bo_fini(bo_val);
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
ttm_bad_manager_fini(priv->ttm_dev, mem_type_evict);
@@ -1070,8 +1070,8 @@ static void ttm_bo_validate_evict_gutting(struct kunit *test)
KUNIT_ASSERT_NULL(test, bo_evict->resource);
KUNIT_ASSERT_TRUE(test, bo_evict->ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC);
- ttm_bo_put(bo_evict);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo_evict);
+ ttm_bo_fini(bo);
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
}
@@ -1128,9 +1128,9 @@ static void ttm_bo_validate_recrusive_evict(struct kunit *test)
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
ttm_mock_manager_fini(priv->ttm_dev, mem_type_evict);
- ttm_bo_put(bo_val);
- ttm_bo_put(bo_tt);
- ttm_bo_put(bo_mock);
+ ttm_bo_fini(bo_val);
+ ttm_bo_fini(bo_tt);
+ ttm_bo_fini(bo_mock);
}
static struct kunit_case ttm_bo_validate_test_cases[] = {
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 29423ceeec5c..fba2a68a556e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -318,18 +318,17 @@ static void ttm_bo_release(struct kref *kref)
bo->destroy(bo);
}
-/**
- * ttm_bo_put
- *
- * @bo: The buffer object.
- *
- * Unreference a buffer object.
- */
+/* TODO: remove! */
void ttm_bo_put(struct ttm_buffer_object *bo)
{
kref_put(&bo->kref, ttm_bo_release);
}
-EXPORT_SYMBOL(ttm_bo_put);
+
+void ttm_bo_fini(struct ttm_buffer_object *bo)
+{
+ ttm_bo_put(bo);
+}
+EXPORT_SYMBOL(ttm_bo_fini);
static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_internal.h b/drivers/gpu/drm/ttm/ttm_bo_internal.h
index 9d8b747a34db..e0d48eac74b0 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_internal.h
+++ b/drivers/gpu/drm/ttm/ttm_bo_internal.h
@@ -55,4 +55,6 @@ ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
return bo;
}
+void ttm_bo_put(struct ttm_buffer_object *bo);
+
#endif
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 123ab0ce1781..bb8c40be3250 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -35,6 +35,7 @@ config DRM_VC4_HDMI_CEC
bool "Broadcom VC4 HDMI CEC Support"
depends on DRM_VC4
select CEC_CORE
+ select DRM_DISPLAY_HDMI_CEC_HELPER
help
Choose this option if you have a Broadcom VC4 GPU
and want to use CEC.
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 07c91b450f93..049c92dd5d27 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -32,6 +32,7 @@
*/
#include <drm/display/drm_hdmi_audio_helper.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
#include <drm/display/drm_scdc_helper.h>
@@ -375,14 +376,6 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
drm_atomic_helper_connector_hdmi_hotplug(connector, status);
- if (status == connector_status_disconnected) {
- cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
- return;
- }
-
- cec_s_phys_addr(vc4_hdmi->cec_adap,
- connector->display_info.source_physical_address, false);
-
if (status != connector_status_connected)
return;
@@ -2384,8 +2377,8 @@ static irqreturn_t vc4_cec_irq_handler_rx_thread(int irq, void *priv)
struct vc4_hdmi *vc4_hdmi = priv;
if (vc4_hdmi->cec_rx_msg.len)
- cec_received_msg(vc4_hdmi->cec_adap,
- &vc4_hdmi->cec_rx_msg);
+ drm_connector_hdmi_cec_received_msg(&vc4_hdmi->connector,
+ &vc4_hdmi->cec_rx_msg);
return IRQ_HANDLED;
}
@@ -2395,15 +2388,17 @@ static irqreturn_t vc4_cec_irq_handler_tx_thread(int irq, void *priv)
struct vc4_hdmi *vc4_hdmi = priv;
if (vc4_hdmi->cec_tx_ok) {
- cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_OK,
- 0, 0, 0, 0);
+ drm_connector_hdmi_cec_transmit_done(&vc4_hdmi->connector,
+ CEC_TX_STATUS_OK,
+ 0, 0, 0, 0);
} else {
/*
* This CEC implementation makes 1 retry, so if we
* get a NACK, then that means it made 2 attempts.
*/
- cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_NACK,
- 0, 2, 0, 0);
+ drm_connector_hdmi_cec_transmit_done(&vc4_hdmi->connector,
+ CEC_TX_STATUS_NACK,
+ 0, 2, 0, 0);
}
return IRQ_HANDLED;
}
@@ -2560,9 +2555,9 @@ static irqreturn_t vc4_cec_irq_handler(int irq, void *priv)
return ret;
}
-static int vc4_hdmi_cec_enable(struct cec_adapter *adap)
+static int vc4_hdmi_cec_enable(struct drm_connector *connector)
{
- struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct drm_device *drm = vc4_hdmi->connector.dev;
/* clock period in microseconds */
const u32 usecs = 1000000 / CEC_CLOCK_FREQ;
@@ -2627,9 +2622,9 @@ static int vc4_hdmi_cec_enable(struct cec_adapter *adap)
return 0;
}
-static int vc4_hdmi_cec_disable(struct cec_adapter *adap)
+static int vc4_hdmi_cec_disable(struct drm_connector *connector)
{
- struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int idx;
@@ -2663,17 +2658,17 @@ static int vc4_hdmi_cec_disable(struct cec_adapter *adap)
return 0;
}
-static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
+static int vc4_hdmi_cec_adap_enable(struct drm_connector *connector, bool enable)
{
if (enable)
- return vc4_hdmi_cec_enable(adap);
+ return vc4_hdmi_cec_enable(connector);
else
- return vc4_hdmi_cec_disable(adap);
+ return vc4_hdmi_cec_disable(connector);
}
-static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
+static int vc4_hdmi_cec_adap_log_addr(struct drm_connector *connector, u8 log_addr)
{
- struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int idx;
@@ -2699,10 +2694,10 @@ static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
return 0;
}
-static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+static int vc4_hdmi_cec_adap_transmit(struct drm_connector *connector, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
- struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct drm_device *dev = vc4_hdmi->connector.dev;
unsigned long flags;
u32 val;
@@ -2745,84 +2740,65 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
return 0;
}
-static const struct cec_adap_ops vc4_hdmi_cec_adap_ops = {
- .adap_enable = vc4_hdmi_cec_adap_enable,
- .adap_log_addr = vc4_hdmi_cec_adap_log_addr,
- .adap_transmit = vc4_hdmi_cec_adap_transmit,
-};
-
-static void vc4_hdmi_cec_release(void *ptr)
-{
- struct vc4_hdmi *vc4_hdmi = ptr;
-
- cec_unregister_adapter(vc4_hdmi->cec_adap);
- vc4_hdmi->cec_adap = NULL;
-}
-
-static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
+static int vc4_hdmi_cec_init(struct drm_connector *connector)
{
- struct cec_connector_info conn_info;
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct platform_device *pdev = vc4_hdmi->pdev;
struct device *dev = &pdev->dev;
int ret;
- if (!of_property_present(dev->of_node, "interrupts")) {
- dev_warn(dev, "'interrupts' DT property is missing, no CEC\n");
- return 0;
- }
-
- vc4_hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops,
- vc4_hdmi,
- vc4_hdmi->variant->card_name,
- CEC_CAP_DEFAULTS |
- CEC_CAP_CONNECTOR_INFO, 1);
- ret = PTR_ERR_OR_ZERO(vc4_hdmi->cec_adap);
- if (ret < 0)
- return ret;
-
- cec_fill_conn_info_from_drm(&conn_info, &vc4_hdmi->connector);
- cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info);
-
if (vc4_hdmi->variant->external_irq_controller) {
ret = devm_request_threaded_irq(dev, platform_get_irq_byname(pdev, "cec-rx"),
vc4_cec_irq_handler_rx_bare,
vc4_cec_irq_handler_rx_thread, 0,
"vc4 hdmi cec rx", vc4_hdmi);
if (ret)
- goto err_delete_cec_adap;
+ return ret;
ret = devm_request_threaded_irq(dev, platform_get_irq_byname(pdev, "cec-tx"),
vc4_cec_irq_handler_tx_bare,
vc4_cec_irq_handler_tx_thread, 0,
"vc4 hdmi cec tx", vc4_hdmi);
if (ret)
- goto err_delete_cec_adap;
+ return ret;
} else {
ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0),
vc4_cec_irq_handler,
vc4_cec_irq_handler_thread, 0,
"vc4 hdmi cec", vc4_hdmi);
if (ret)
- goto err_delete_cec_adap;
+ return ret;
}
- ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev);
- if (ret < 0)
- goto err_delete_cec_adap;
+ return 0;
+}
+
+static const struct drm_connector_hdmi_cec_funcs vc4_hdmi_cec_funcs = {
+ .init = vc4_hdmi_cec_init,
+ .enable = vc4_hdmi_cec_adap_enable,
+ .log_addr = vc4_hdmi_cec_adap_log_addr,
+ .transmit = vc4_hdmi_cec_adap_transmit,
+};
+
+static int vc4_hdmi_cec_register(struct vc4_hdmi *vc4_hdmi)
+{
+ struct platform_device *pdev = vc4_hdmi->pdev;
+ struct device *dev = &pdev->dev;
+
+ if (!of_property_present(dev->of_node, "interrupts")) {
+ dev_warn(dev, "'interrupts' DT property is missing, no CEC\n");
+ return 0;
+ }
/*
- * NOTE: Strictly speaking, we should probably use a DRM-managed
- * registration there to avoid removing the CEC adapter by the
- * time the DRM driver doesn't have any user anymore.
+ * NOTE: the CEC adapter will be unregistered by drmm cleanup from
+ * drm_managed_release(), which is called from drm_dev_release()
+ * during device unbind.
*
* However, the CEC framework already cleans up the CEC adapter
* only when the last user has closed its file descriptor, so we
* don't need to handle it in DRM.
*
- * By the time the device-managed hook is executed, we will give
- * up our reference to the CEC adapter and therefore don't
- * really care when it's actually freed.
- *
* There's still a problematic sequence: if we unregister our
* CEC adapter, but the userspace keeps a handle on the CEC
* adapter but not the DRM device for some reason. In such a
@@ -2833,19 +2809,14 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
* the CEC framework already handles this too, by calling
* cec_is_registered() in cec_ioctl() and cec_poll().
*/
- ret = devm_add_action_or_reset(dev, vc4_hdmi_cec_release, vc4_hdmi);
- if (ret)
- return ret;
-
- return 0;
-
-err_delete_cec_adap:
- cec_delete_adapter(vc4_hdmi->cec_adap);
-
- return ret;
+ return drmm_connector_hdmi_cec_register(&vc4_hdmi->connector,
+ &vc4_hdmi_cec_funcs,
+ vc4_hdmi->variant->card_name,
+ 1,
+ &pdev->dev);
}
#else
-static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
+static int vc4_hdmi_cec_register(struct vc4_hdmi *vc4_hdmi)
{
return 0;
}
@@ -3250,7 +3221,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
if (ret)
goto err_put_runtime_pm;
- ret = vc4_hdmi_cec_init(vc4_hdmi);
+ ret = vc4_hdmi_cec_register(vc4_hdmi);
if (ret)
goto err_put_runtime_pm;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index a31157c99bee..8d069718df00 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -147,7 +147,6 @@ struct vc4_hdmi {
*/
bool disable_wifi_frequencies;
- struct cec_adapter *cec_adap;
struct cec_msg cec_rx_msg;
bool cec_tx_ok;
bool cec_irq_was_rx;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index eedf1fe60be7..39f8c46550c2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -37,7 +37,7 @@ static void vmw_gem_object_free(struct drm_gem_object *gobj)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj);
if (bo)
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static int vmw_gem_object_open(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 8422f3cab113..edc6e29f0cc1 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -1708,7 +1708,7 @@ static void xe_gem_object_free(struct drm_gem_object *obj)
* refcount directly if needed.
*/
__xe_bo_vunmap(gem_to_xe_bo(obj));
- ttm_bo_put(container_of(obj, struct ttm_buffer_object, base));
+ ttm_bo_fini(container_of(obj, struct ttm_buffer_object, base));
}
static void xe_gem_object_close(struct drm_gem_object *obj,
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 344cc9e741c1..723a80895cd4 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -471,6 +471,18 @@ static int host1x_device_add(struct host1x *host1x,
mutex_unlock(&clients_lock);
+ /*
+ * Add device even if there are no subdevs to ensure syncpoint functionality
+ * is available regardless of whether any engine subdevices are present
+ */
+ if (list_empty(&device->subdevs)) {
+ err = device_add(&device->dev);
+ if (err < 0)
+ dev_err(&device->dev, "failed to add device: %d\n", err);
+ else
+ device->registered = true;
+ }
+
return 0;
}
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 1f93e5e276c0..e365df6af353 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -585,14 +585,8 @@ static int host1x_probe(struct platform_device *pdev)
}
host->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(host->clk)) {
- err = PTR_ERR(host->clk);
-
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to get clock: %d\n", err);
-
- return err;
- }
+ if (IS_ERR(host->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(host->clk), "failed to get clock\n");
err = host1x_get_resets(host);
if (err)
@@ -821,6 +815,7 @@ u64 host1x_get_dma_mask(struct host1x *host1x)
}
EXPORT_SYMBOL(host1x_get_dma_mask);
+MODULE_SOFTDEP("post: tegra-drm");
MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
MODULE_DESCRIPTION("Host1x driver for Tegra products");
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index d44b8de890be..2df6a16d484e 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -47,24 +47,11 @@ static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
}
}
-static void submit_wait(struct host1x_job *job, u32 id, u32 threshold,
- u32 next_class)
+static void submit_wait(struct host1x_job *job, u32 id, u32 threshold)
{
struct host1x_cdma *cdma = &job->channel->cdma;
-#if HOST1X_HW >= 6
- u32 stream_id;
-
- /*
- * If a memory context has been set, use it. Otherwise
- * (if context isolation is disabled) use the engine's
- * firmware stream ID.
- */
- if (job->memory_context)
- stream_id = job->memory_context->stream_id;
- else
- stream_id = job->engine_fallback_streamid;
-
+#if HOST1X_HW >= 2
host1x_cdma_push_wide(cdma,
host1x_opcode_setclass(
HOST1X_CLASS_HOST1X,
@@ -76,23 +63,6 @@ static void submit_wait(struct host1x_job *job, u32 id, u32 threshold,
id,
HOST1X_OPCODE_NOP
);
- host1x_cdma_push_wide(&job->channel->cdma,
- host1x_opcode_setclass(job->class, 0, 0),
- host1x_opcode_setpayload(stream_id),
- host1x_opcode_setstreamid(job->engine_streamid_offset / 4),
- HOST1X_OPCODE_NOP);
-#elif HOST1X_HW >= 2
- host1x_cdma_push_wide(cdma,
- host1x_opcode_setclass(
- HOST1X_CLASS_HOST1X,
- HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32,
- /* WAIT_SYNCPT_32 is at SYNCPT_PAYLOAD_32+2 */
- BIT(0) | BIT(2)
- ),
- threshold,
- id,
- host1x_opcode_setclass(next_class, 0, 0)
- );
#else
/* TODO add waitchk or use waitbases or other mitigation */
host1x_cdma_push(cdma,
@@ -103,6 +73,32 @@ static void submit_wait(struct host1x_job *job, u32 id, u32 threshold,
),
host1x_class_host_wait_syncpt(id, threshold)
);
+#endif
+}
+
+static void submit_setclass(struct host1x_job *job, u32 next_class)
+{
+ struct host1x_cdma *cdma = &job->channel->cdma;
+
+#if HOST1X_HW >= 6
+ u32 stream_id;
+
+ /*
+ * If a memory context has been set, use it. Otherwise
+ * (if context isolation is disabled) use the engine's
+ * firmware stream ID.
+ */
+ if (job->memory_context)
+ stream_id = job->memory_context->stream_id;
+ else
+ stream_id = job->engine_fallback_streamid;
+
+ host1x_cdma_push_wide(cdma,
+ host1x_opcode_setclass(next_class, 0, 0),
+ host1x_opcode_setpayload(stream_id),
+ host1x_opcode_setstreamid(job->engine_streamid_offset / 4),
+ HOST1X_OPCODE_NOP);
+#else
host1x_cdma_push(cdma,
host1x_opcode_setclass(next_class, 0, 0),
HOST1X_OPCODE_NOP
@@ -110,7 +106,8 @@ static void submit_wait(struct host1x_job *job, u32 id, u32 threshold,
#endif
}
-static void submit_gathers(struct host1x_job *job, u32 job_syncpt_base)
+static void submit_gathers(struct host1x_job *job, struct host1x_job_cmd *cmds, u32 num_cmds,
+ u32 job_syncpt_base)
{
struct host1x_cdma *cdma = &job->channel->cdma;
#if HOST1X_HW < 6
@@ -119,8 +116,8 @@ static void submit_gathers(struct host1x_job *job, u32 job_syncpt_base)
unsigned int i;
u32 threshold;
- for (i = 0; i < job->num_cmds; i++) {
- struct host1x_job_cmd *cmd = &job->cmds[i];
+ for (i = 0; i < num_cmds; i++) {
+ struct host1x_job_cmd *cmd = &cmds[i];
if (cmd->is_wait) {
if (cmd->wait.relative)
@@ -128,7 +125,8 @@ static void submit_gathers(struct host1x_job *job, u32 job_syncpt_base)
else
threshold = cmd->wait.threshold;
- submit_wait(job, cmd->wait.id, threshold, cmd->wait.next_class);
+ submit_wait(job, cmd->wait.id, threshold);
+ submit_setclass(job, cmd->wait.next_class);
} else {
struct host1x_job_gather *g = &cmd->gather;
@@ -216,7 +214,34 @@ static void channel_program_cdma(struct host1x_job *job)
#if HOST1X_HW >= 6
u32 fence;
+ int i = 0;
+
+ if (job->num_cmds == 0)
+ goto prefences_done;
+ if (!job->cmds[0].is_wait || job->cmds[0].wait.relative)
+ goto prefences_done;
+
+ /* Enter host1x class with invalid stream ID for prefence waits. */
+ host1x_cdma_push_wide(cdma,
+ host1x_opcode_acquire_mlock(1),
+ host1x_opcode_setclass(1, 0, 0),
+ host1x_opcode_setpayload(0),
+ host1x_opcode_setstreamid(0x1fffff));
+
+ for (i = 0; i < job->num_cmds; i++) {
+ struct host1x_job_cmd *cmd = &job->cmds[i];
+
+ if (!cmd->is_wait || cmd->wait.relative)
+ break;
+
+ submit_wait(job, cmd->wait.id, cmd->wait.threshold);
+ }
+
+ host1x_cdma_push(cdma,
+ HOST1X_OPCODE_NOP,
+ host1x_opcode_release_mlock(1));
+prefences_done:
/* Enter engine class with invalid stream ID. */
host1x_cdma_push_wide(cdma,
host1x_opcode_acquire_mlock(job->class),
@@ -230,11 +255,12 @@ static void channel_program_cdma(struct host1x_job *job)
host1x_opcode_nonincr(HOST1X_UCLASS_INCR_SYNCPT, 1),
HOST1X_UCLASS_INCR_SYNCPT_INDX_F(job->syncpt->id) |
HOST1X_UCLASS_INCR_SYNCPT_COND_F(4));
- submit_wait(job, job->syncpt->id, fence, job->class);
+ submit_wait(job, job->syncpt->id, fence);
+ submit_setclass(job, job->class);
/* Submit work. */
job->syncpt_end = host1x_syncpt_incr_max(sp, job->syncpt_incrs);
- submit_gathers(job, job->syncpt_end - job->syncpt_incrs);
+ submit_gathers(job, job->cmds + i, job->num_cmds - i, job->syncpt_end - job->syncpt_incrs);
/* Before releasing MLOCK, ensure engine is idle again. */
fence = host1x_syncpt_incr_max(sp, 1);
@@ -242,7 +268,7 @@ static void channel_program_cdma(struct host1x_job *job)
host1x_opcode_nonincr(HOST1X_UCLASS_INCR_SYNCPT, 1),
HOST1X_UCLASS_INCR_SYNCPT_INDX_F(job->syncpt->id) |
HOST1X_UCLASS_INCR_SYNCPT_COND_F(4));
- submit_wait(job, job->syncpt->id, fence, job->class);
+ submit_wait(job, job->syncpt->id, fence);
/* Release MLOCK. */
host1x_cdma_push(cdma,
@@ -272,7 +298,7 @@ static void channel_program_cdma(struct host1x_job *job)
job->syncpt_end = host1x_syncpt_incr_max(sp, job->syncpt_incrs);
- submit_gathers(job, job->syncpt_end - job->syncpt_incrs);
+ submit_gathers(job, job->cmds, job->num_cmds, job->syncpt_end - job->syncpt_incrs);
#endif
}
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index f63d14a57a1d..acc7d82e0585 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -345,8 +345,6 @@ static void syncpt_release(struct kref *ref)
sp->locked = false;
- mutex_lock(&sp->host->syncpt_mutex);
-
host1x_syncpt_base_free(sp->base);
kfree(sp->name);
sp->base = NULL;
@@ -369,7 +367,7 @@ void host1x_syncpt_put(struct host1x_syncpt *sp)
if (!sp)
return;
- kref_put(&sp->ref, syncpt_release);
+ kref_put_mutex(&sp->ref, syncpt_release, &sp->host->syncpt_mutex);
}
EXPORT_SYMBOL(host1x_syncpt_put);
diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c
index f9475c14f733..08cfcd81c6b4 100644
--- a/drivers/video/fbdev/core/bitblit.c
+++ b/drivers/video/fbdev/core/bitblit.c
@@ -236,10 +236,10 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
int fg, int bg)
{
struct fb_cursor cursor;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
int w = DIV_ROUND_UP(vc->vc_font.width, 8), c;
- int y = real_y(ops->p, vc->state.y);
+ int y = real_y(par->p, vc->state.y);
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1;
char *src;
@@ -253,10 +253,10 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
attribute = get_attribute(info, c);
src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height));
- if (ops->cursor_state.image.data != src ||
- ops->cursor_reset) {
- ops->cursor_state.image.data = src;
- cursor.set |= FB_CUR_SETIMAGE;
+ if (par->cursor_state.image.data != src ||
+ par->cursor_reset) {
+ par->cursor_state.image.data = src;
+ cursor.set |= FB_CUR_SETIMAGE;
}
if (attribute) {
@@ -265,46 +265,46 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
dst = kmalloc_array(w, vc->vc_font.height, GFP_ATOMIC);
if (!dst)
return;
- kfree(ops->cursor_data);
- ops->cursor_data = dst;
+ kfree(par->cursor_data);
+ par->cursor_data = dst;
update_attr(dst, src, attribute, vc);
src = dst;
}
- if (ops->cursor_state.image.fg_color != fg ||
- ops->cursor_state.image.bg_color != bg ||
- ops->cursor_reset) {
- ops->cursor_state.image.fg_color = fg;
- ops->cursor_state.image.bg_color = bg;
+ if (par->cursor_state.image.fg_color != fg ||
+ par->cursor_state.image.bg_color != bg ||
+ par->cursor_reset) {
+ par->cursor_state.image.fg_color = fg;
+ par->cursor_state.image.bg_color = bg;
cursor.set |= FB_CUR_SETCMAP;
}
- if ((ops->cursor_state.image.dx != (vc->vc_font.width * vc->state.x)) ||
- (ops->cursor_state.image.dy != (vc->vc_font.height * y)) ||
- ops->cursor_reset) {
- ops->cursor_state.image.dx = vc->vc_font.width * vc->state.x;
- ops->cursor_state.image.dy = vc->vc_font.height * y;
+ if ((par->cursor_state.image.dx != (vc->vc_font.width * vc->state.x)) ||
+ (par->cursor_state.image.dy != (vc->vc_font.height * y)) ||
+ par->cursor_reset) {
+ par->cursor_state.image.dx = vc->vc_font.width * vc->state.x;
+ par->cursor_state.image.dy = vc->vc_font.height * y;
cursor.set |= FB_CUR_SETPOS;
}
- if (ops->cursor_state.image.height != vc->vc_font.height ||
- ops->cursor_state.image.width != vc->vc_font.width ||
- ops->cursor_reset) {
- ops->cursor_state.image.height = vc->vc_font.height;
- ops->cursor_state.image.width = vc->vc_font.width;
+ if (par->cursor_state.image.height != vc->vc_font.height ||
+ par->cursor_state.image.width != vc->vc_font.width ||
+ par->cursor_reset) {
+ par->cursor_state.image.height = vc->vc_font.height;
+ par->cursor_state.image.width = vc->vc_font.width;
cursor.set |= FB_CUR_SETSIZE;
}
- if (ops->cursor_state.hot.x || ops->cursor_state.hot.y ||
- ops->cursor_reset) {
- ops->cursor_state.hot.x = cursor.hot.y = 0;
+ if (par->cursor_state.hot.x || par->cursor_state.hot.y ||
+ par->cursor_reset) {
+ par->cursor_state.hot.x = cursor.hot.y = 0;
cursor.set |= FB_CUR_SETHOT;
}
if (cursor.set & FB_CUR_SETSIZE ||
- vc->vc_cursor_type != ops->p->cursor_shape ||
- ops->cursor_state.mask == NULL ||
- ops->cursor_reset) {
+ vc->vc_cursor_type != par->p->cursor_shape ||
+ par->cursor_state.mask == NULL ||
+ par->cursor_reset) {
char *mask = kmalloc_array(w, vc->vc_font.height, GFP_ATOMIC);
int cur_height, size, i = 0;
u8 msk = 0xff;
@@ -312,13 +312,13 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
if (!mask)
return;
- kfree(ops->cursor_state.mask);
- ops->cursor_state.mask = mask;
+ kfree(par->cursor_state.mask);
+ par->cursor_state.mask = mask;
- ops->p->cursor_shape = vc->vc_cursor_type;
+ par->p->cursor_shape = vc->vc_cursor_type;
cursor.set |= FB_CUR_SETSHAPE;
- switch (CUR_SIZE(ops->p->cursor_shape)) {
+ switch (CUR_SIZE(par->p->cursor_shape)) {
case CUR_NONE:
cur_height = 0;
break;
@@ -347,19 +347,19 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
mask[i++] = msk;
}
- ops->cursor_state.enable = enable && !use_sw;
+ par->cursor_state.enable = enable && !use_sw;
cursor.image.data = src;
- cursor.image.fg_color = ops->cursor_state.image.fg_color;
- cursor.image.bg_color = ops->cursor_state.image.bg_color;
- cursor.image.dx = ops->cursor_state.image.dx;
- cursor.image.dy = ops->cursor_state.image.dy;
- cursor.image.height = ops->cursor_state.image.height;
- cursor.image.width = ops->cursor_state.image.width;
- cursor.hot.x = ops->cursor_state.hot.x;
- cursor.hot.y = ops->cursor_state.hot.y;
- cursor.mask = ops->cursor_state.mask;
- cursor.enable = ops->cursor_state.enable;
+ cursor.image.fg_color = par->cursor_state.image.fg_color;
+ cursor.image.bg_color = par->cursor_state.image.bg_color;
+ cursor.image.dx = par->cursor_state.image.dx;
+ cursor.image.dy = par->cursor_state.image.dy;
+ cursor.image.height = par->cursor_state.image.height;
+ cursor.image.width = par->cursor_state.image.width;
+ cursor.hot.x = par->cursor_state.hot.x;
+ cursor.hot.y = par->cursor_state.hot.y;
+ cursor.mask = par->cursor_state.mask;
+ cursor.enable = par->cursor_state.enable;
cursor.image.depth = 1;
cursor.rop = ROP_XOR;
@@ -369,31 +369,31 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
if (err)
soft_cursor(info, &cursor);
- ops->cursor_reset = 0;
+ par->cursor_reset = 0;
}
static int bit_update_start(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
int err;
- err = fb_pan_display(info, &ops->var);
- ops->var.xoffset = info->var.xoffset;
- ops->var.yoffset = info->var.yoffset;
- ops->var.vmode = info->var.vmode;
+ err = fb_pan_display(info, &par->var);
+ par->var.xoffset = info->var.xoffset;
+ par->var.yoffset = info->var.yoffset;
+ par->var.vmode = info->var.vmode;
return err;
}
-void fbcon_set_bitops(struct fbcon_ops *ops)
+static const struct fbcon_bitops bit_fbcon_bitops = {
+ .bmove = bit_bmove,
+ .clear = bit_clear,
+ .putcs = bit_putcs,
+ .clear_margins = bit_clear_margins,
+ .cursor = bit_cursor,
+ .update_start = bit_update_start,
+};
+
+void fbcon_set_bitops_ur(struct fbcon_par *par)
{
- ops->bmove = bit_bmove;
- ops->clear = bit_clear;
- ops->putcs = bit_putcs;
- ops->clear_margins = bit_clear_margins;
- ops->cursor = bit_cursor;
- ops->update_start = bit_update_start;
- ops->rotate_font = NULL;
-
- if (ops->rotate)
- fbcon_set_rotate(ops);
+ par->bitops = &bit_fbcon_bitops;
}
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index a507d05f8fea..5fade44931b8 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -81,6 +81,7 @@
#include <asm/irq.h>
#include "fbcon.h"
+#include "fbcon_rotate.h"
#include "fb_internal.h"
/*
@@ -198,27 +199,27 @@ static struct device *fbcon_device;
#ifdef CONFIG_FRAMEBUFFER_CONSOLE_ROTATION
static inline void fbcon_set_rotation(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
if (!(info->flags & FBINFO_MISC_TILEBLITTING) &&
- ops->p->con_rotate < 4)
- ops->rotate = ops->p->con_rotate;
+ par->p->con_rotate < 4)
+ par->rotate = par->p->con_rotate;
else
- ops->rotate = 0;
+ par->rotate = 0;
}
static void fbcon_rotate(struct fb_info *info, u32 rotate)
{
- struct fbcon_ops *ops= info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fb_info *fb_info;
- if (!ops || ops->currcon == -1)
+ if (!par || par->currcon == -1)
return;
- fb_info = fbcon_info_from_console(ops->currcon);
+ fb_info = fbcon_info_from_console(par->currcon);
if (info == fb_info) {
- struct fbcon_display *p = &fb_display[ops->currcon];
+ struct fbcon_display *p = &fb_display[par->currcon];
if (rotate < 4)
p->con_rotate = rotate;
@@ -231,12 +232,12 @@ static void fbcon_rotate(struct fb_info *info, u32 rotate)
static void fbcon_rotate_all(struct fb_info *info, u32 rotate)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct vc_data *vc;
struct fbcon_display *p;
int i;
- if (!ops || ops->currcon < 0 || rotate > 3)
+ if (!par || par->currcon < 0 || rotate > 3)
return;
for (i = first_fb_vc; i <= last_fb_vc; i++) {
@@ -254,9 +255,9 @@ static void fbcon_rotate_all(struct fb_info *info, u32 rotate)
#else
static inline void fbcon_set_rotation(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
- ops->rotate = FB_ROTATE_UR;
+ par->rotate = FB_ROTATE_UR;
}
static void fbcon_rotate(struct fb_info *info, u32 rotate)
@@ -270,11 +271,31 @@ static void fbcon_rotate_all(struct fb_info *info, u32 rotate)
}
#endif /* CONFIG_FRAMEBUFFER_CONSOLE_ROTATION */
+static void fbcon_set_bitops(struct fbcon_par *par)
+{
+ switch (par->rotate) {
+ default:
+ fallthrough;
+ case FB_ROTATE_UR:
+ fbcon_set_bitops_ur(par);
+ break;
+ case FB_ROTATE_CW:
+ fbcon_set_bitops_cw(par);
+ break;
+ case FB_ROTATE_UD:
+ fbcon_set_bitops_ud(par);
+ break;
+ case FB_ROTATE_CCW:
+ fbcon_set_bitops_ccw(par);
+ break;
+ }
+}
+
static int fbcon_get_rotate(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
- return (ops) ? ops->rotate : 0;
+ return (par) ? par->rotate : 0;
}
static bool fbcon_skip_panic(struct fb_info *info)
@@ -291,10 +312,10 @@ static bool fbcon_skip_panic(struct fb_info *info)
static inline bool fbcon_is_active(struct vc_data *vc, struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
return info->state == FBINFO_STATE_RUNNING &&
- vc->vc_mode == KD_TEXT && !ops->graphics && !fbcon_skip_panic(info);
+ vc->vc_mode == KD_TEXT && !par->graphics && !fbcon_skip_panic(info);
}
static int get_color(struct vc_data *vc, struct fb_info *info,
@@ -376,7 +397,7 @@ static int get_bg_color(struct vc_data *vc, struct fb_info *info, u16 c)
static void fb_flashcursor(struct work_struct *work)
{
- struct fbcon_ops *ops = container_of(work, struct fbcon_ops, cursor_work.work);
+ struct fbcon_par *par = container_of(work, struct fbcon_par, cursor_work.work);
struct fb_info *info;
struct vc_data *vc = NULL;
int c;
@@ -391,10 +412,10 @@ static void fb_flashcursor(struct work_struct *work)
return;
/* protected by console_lock */
- info = ops->info;
+ info = par->info;
- if (ops->currcon != -1)
- vc = vc_cons[ops->currcon].d;
+ if (par->currcon != -1)
+ vc = vc_cons[par->currcon].d;
if (!vc || !con_is_visible(vc) ||
fbcon_info_from_console(vc->vc_num) != info ||
@@ -404,30 +425,30 @@ static void fb_flashcursor(struct work_struct *work)
}
c = scr_readw((u16 *) vc->vc_pos);
- enable = ops->cursor_flash && !ops->cursor_state.enable;
- ops->cursor(vc, info, enable,
- get_fg_color(vc, info, c),
- get_bg_color(vc, info, c));
+ enable = par->cursor_flash && !par->cursor_state.enable;
+ par->bitops->cursor(vc, info, enable,
+ get_fg_color(vc, info, c),
+ get_bg_color(vc, info, c));
console_unlock();
- queue_delayed_work(system_power_efficient_wq, &ops->cursor_work,
- ops->cur_blink_jiffies);
+ queue_delayed_work(system_power_efficient_wq, &par->cursor_work,
+ par->cur_blink_jiffies);
}
static void fbcon_add_cursor_work(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
if (fbcon_cursor_blink)
- queue_delayed_work(system_power_efficient_wq, &ops->cursor_work,
- ops->cur_blink_jiffies);
+ queue_delayed_work(system_power_efficient_wq, &par->cursor_work,
+ par->cur_blink_jiffies);
}
static void fbcon_del_cursor_work(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
- cancel_delayed_work_sync(&ops->cursor_work);
+ cancel_delayed_work_sync(&par->cursor_work);
}
#ifndef MODULE
@@ -587,7 +608,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
int cols, int rows, int new_cols, int new_rows)
{
/* Need to make room for the logo */
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
int cnt, erase = vc->vc_video_erase_char, step;
unsigned short *save = NULL, *r, *q;
int logo_height;
@@ -603,7 +624,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
*/
if (fb_get_color_depth(&info->var, &info->fix) == 1)
erase &= ~0x400;
- logo_height = fb_prepare_logo(info, ops->rotate);
+ logo_height = fb_prepare_logo(info, par->rotate);
logo_lines = DIV_ROUND_UP(logo_height, vc->vc_font.height);
q = (unsigned short *) (vc->vc_origin +
vc->vc_size_row * rows);
@@ -675,15 +696,15 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
#ifdef CONFIG_FB_TILEBLITTING
static void set_blitting_type(struct vc_data *vc, struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
- ops->p = &fb_display[vc->vc_num];
+ par->p = &fb_display[vc->vc_num];
if ((info->flags & FBINFO_MISC_TILEBLITTING))
fbcon_set_tileops(vc, info);
else {
fbcon_set_rotation(info);
- fbcon_set_bitops(ops);
+ fbcon_set_bitops(par);
}
}
@@ -700,12 +721,12 @@ static int fbcon_invalid_charcount(struct fb_info *info, unsigned charcount)
#else
static void set_blitting_type(struct vc_data *vc, struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
info->flags &= ~FBINFO_MISC_TILEBLITTING;
- ops->p = &fb_display[vc->vc_num];
+ par->p = &fb_display[vc->vc_num];
fbcon_set_rotation(info);
- fbcon_set_bitops(ops);
+ fbcon_set_bitops(par);
}
static int fbcon_invalid_charcount(struct fb_info *info, unsigned charcount)
@@ -725,13 +746,13 @@ static void fbcon_release(struct fb_info *info)
module_put(info->fbops->owner);
if (info->fbcon_par) {
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
fbcon_del_cursor_work(info);
- kfree(ops->cursor_state.mask);
- kfree(ops->cursor_data);
- kfree(ops->cursor_src);
- kfree(ops->fontbuffer);
+ kfree(par->cursor_state.mask);
+ kfree(par->cursor_data);
+ kfree(par->cursor_src);
+ kfree(par->fontbuffer);
kfree(info->fbcon_par);
info->fbcon_par = NULL;
}
@@ -739,7 +760,7 @@ static void fbcon_release(struct fb_info *info)
static int fbcon_open(struct fb_info *info)
{
- struct fbcon_ops *ops;
+ struct fbcon_par *par;
if (!try_module_get(info->fbops->owner))
return -ENODEV;
@@ -753,16 +774,16 @@ static int fbcon_open(struct fb_info *info)
}
unlock_fb_info(info);
- ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL);
- if (!ops) {
+ par = kzalloc(sizeof(*par), GFP_KERNEL);
+ if (!par) {
fbcon_release(info);
return -ENOMEM;
}
- INIT_DELAYED_WORK(&ops->cursor_work, fb_flashcursor);
- ops->info = info;
- info->fbcon_par = ops;
- ops->cur_blink_jiffies = HZ / 5;
+ INIT_DELAYED_WORK(&par->cursor_work, fb_flashcursor);
+ par->info = info;
+ info->fbcon_par = par;
+ par->cur_blink_jiffies = HZ / 5;
return 0;
}
@@ -809,12 +830,12 @@ static void con2fb_release_oldinfo(struct vc_data *vc, struct fb_info *oldinfo,
static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
int unit, int show_logo)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
int ret;
- ops->currcon = fg_console;
+ par->currcon = fg_console;
- if (info->fbops->fb_set_par && !ops->initialized) {
+ if (info->fbops->fb_set_par && !par->initialized) {
ret = info->fbops->fb_set_par(info);
if (ret)
@@ -823,8 +844,8 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
"error code %d\n", ret);
}
- ops->initialized = true;
- ops->graphics = 0;
+ par->initialized = true;
+ par->graphics = 0;
fbcon_set_disp(info, &info->var, unit);
if (show_logo) {
@@ -961,7 +982,7 @@ static const char *fbcon_startup(void)
struct vc_data *vc = vc_cons[fg_console].d;
const struct font_desc *font = NULL;
struct fb_info *info = NULL;
- struct fbcon_ops *ops;
+ struct fbcon_par *par;
int rows, cols;
/*
@@ -981,10 +1002,10 @@ static const char *fbcon_startup(void)
if (fbcon_open(info))
return NULL;
- ops = info->fbcon_par;
- ops->currcon = -1;
- ops->graphics = 1;
- ops->cur_rotate = -1;
+ par = info->fbcon_par;
+ par->currcon = -1;
+ par->graphics = 1;
+ par->cur_rotate = -1;
p->con_rotate = initial_rotation;
if (p->con_rotate == -1)
@@ -1007,8 +1028,8 @@ static const char *fbcon_startup(void)
vc->vc_font.charcount = font->charcount;
}
- cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
- rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres);
+ rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres);
cols /= vc->vc_font.width;
rows /= vc->vc_font.height;
vc_resize(vc, cols, rows);
@@ -1026,7 +1047,7 @@ static const char *fbcon_startup(void)
static void fbcon_init(struct vc_data *vc, bool init)
{
struct fb_info *info;
- struct fbcon_ops *ops;
+ struct fbcon_par *par;
struct vc_data **default_mode = vc->vc_display_fg;
struct vc_data *svc = *default_mode;
struct fbcon_display *t, *p = &fb_display[vc->vc_num];
@@ -1100,8 +1121,8 @@ static void fbcon_init(struct vc_data *vc, bool init)
if (!*vc->uni_pagedict_loc)
con_copy_unimap(vc, svc);
- ops = info->fbcon_par;
- ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
+ par = info->fbcon_par;
+ par->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
p->con_rotate = initial_rotation;
if (p->con_rotate == -1)
@@ -1113,8 +1134,8 @@ static void fbcon_init(struct vc_data *vc, bool init)
cols = vc->vc_cols;
rows = vc->vc_rows;
- new_cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
- new_rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ new_cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres);
+ new_rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres);
new_cols /= vc->vc_font.width;
new_rows /= vc->vc_font.height;
@@ -1126,7 +1147,7 @@ static void fbcon_init(struct vc_data *vc, bool init)
* We need to do it in fbcon_init() to prevent screen corruption.
*/
if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) {
- if (info->fbops->fb_set_par && !ops->initialized) {
+ if (info->fbops->fb_set_par && !par->initialized) {
ret = info->fbops->fb_set_par(info);
if (ret)
@@ -1135,10 +1156,10 @@ static void fbcon_init(struct vc_data *vc, bool init)
"error code %d\n", ret);
}
- ops->initialized = true;
+ par->initialized = true;
}
- ops->graphics = 0;
+ par->graphics = 0;
#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
if ((info->flags & FBINFO_HWACCEL_COPYAREA) &&
@@ -1162,12 +1183,12 @@ static void fbcon_init(struct vc_data *vc, bool init)
if (logo)
fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows);
- if (ops->rotate_font && ops->rotate_font(info, vc)) {
- ops->rotate = FB_ROTATE_UR;
+ if (par->bitops->rotate_font && par->bitops->rotate_font(info, vc)) {
+ par->rotate = FB_ROTATE_UR;
set_blitting_type(vc, info);
}
- ops->p = &fb_display[fg_console];
+ par->p = &fb_display[fg_console];
}
static void fbcon_free_font(struct fbcon_display *p)
@@ -1205,7 +1226,7 @@ static void fbcon_deinit(struct vc_data *vc)
{
struct fbcon_display *p = &fb_display[vc->vc_num];
struct fb_info *info;
- struct fbcon_ops *ops;
+ struct fbcon_par *par;
int idx;
fbcon_free_font(p);
@@ -1219,15 +1240,15 @@ static void fbcon_deinit(struct vc_data *vc)
if (!info)
goto finished;
- ops = info->fbcon_par;
+ par = info->fbcon_par;
- if (!ops)
+ if (!par)
goto finished;
if (con_is_visible(vc))
fbcon_del_cursor_work(info);
- ops->initialized = false;
+ par->initialized = false;
finished:
fbcon_free_font(p);
@@ -1274,7 +1295,7 @@ static void __fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
unsigned int height, unsigned int width)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
int fg, bg;
struct fbcon_display *p = &fb_display[vc->vc_num];
u_int y_break;
@@ -1289,7 +1310,7 @@ static void __fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
vc->vc_top = 0;
/*
* If the font dimensions are not an integral of the display
- * dimensions then the ops->clear below won't end up clearing
+ * dimensions then the par->clear below won't end up clearing
* the margins. Call clear_margins here in case the logo
* bitmap stretched into the margin area.
*/
@@ -1303,11 +1324,11 @@ static void __fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
y_break = p->vrows - p->yscroll;
if (sy < y_break && sy + height - 1 >= y_break) {
u_int b = y_break - sy;
- ops->clear(vc, info, real_y(p, sy), sx, b, width, fg, bg);
- ops->clear(vc, info, real_y(p, sy + b), sx, height - b,
- width, fg, bg);
+ par->bitops->clear(vc, info, real_y(p, sy), sx, b, width, fg, bg);
+ par->bitops->clear(vc, info, real_y(p, sy + b), sx, height - b,
+ width, fg, bg);
} else
- ops->clear(vc, info, real_y(p, sy), sx, height, width, fg, bg);
+ par->bitops->clear(vc, info, real_y(p, sy), sx, height, width, fg, bg);
}
static void fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
@@ -1321,30 +1342,30 @@ static void fbcon_putcs(struct vc_data *vc, const u16 *s, unsigned int count,
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_display *p = &fb_display[vc->vc_num];
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
if (fbcon_is_active(vc, info))
- ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
- get_fg_color(vc, info, scr_readw(s)),
- get_bg_color(vc, info, scr_readw(s)));
+ par->bitops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
+ get_fg_color(vc, info, scr_readw(s)),
+ get_bg_color(vc, info, scr_readw(s)));
}
static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
if (fbcon_is_active(vc, info))
- ops->clear_margins(vc, info, margin_color, bottom_only);
+ par->bitops->clear_margins(vc, info, margin_color, bottom_only);
}
static void fbcon_cursor(struct vc_data *vc, bool enable)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
int c = scr_readw((u16 *) vc->vc_pos);
- ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
+ par->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
if (!fbcon_is_active(vc, info) || vc->vc_deccm != 1)
return;
@@ -1354,14 +1375,14 @@ static void fbcon_cursor(struct vc_data *vc, bool enable)
else
fbcon_add_cursor_work(info);
- ops->cursor_flash = enable;
+ par->cursor_flash = enable;
- if (!ops->cursor)
+ if (!par->bitops->cursor)
return;
- ops->cursor(vc, info, enable,
- get_fg_color(vc, info, c),
- get_bg_color(vc, info, c));
+ par->bitops->cursor(vc, info, enable,
+ get_fg_color(vc, info, c),
+ get_bg_color(vc, info, c));
}
static int scrollback_phys_max = 0;
@@ -1374,7 +1395,7 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
struct fbcon_display *p, *t;
struct vc_data **default_mode, *vc;
struct vc_data *svc;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
int rows, cols;
unsigned long ret = 0;
@@ -1407,7 +1428,7 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
var->yoffset = info->var.yoffset;
var->xoffset = info->var.xoffset;
fb_set_var(info, var);
- ops->var = info->var;
+ par->var = info->var;
vc->vc_can_do_color = (fb_get_color_depth(&info->var, &info->fix)!=1);
vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
if (vc->vc_font.charcount == 256) {
@@ -1423,8 +1444,8 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
if (!*vc->uni_pagedict_loc)
con_copy_unimap(vc, svc);
- cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
- rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres);
+ rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres);
cols /= vc->vc_font.width;
rows /= vc->vc_font.height;
ret = vc_resize(vc, cols, rows);
@@ -1436,16 +1457,16 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
static __inline__ void ywrap_up(struct vc_data *vc, int count)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
p->yscroll += count;
if (p->yscroll >= p->vrows) /* Deal with wrap */
p->yscroll -= p->vrows;
- ops->var.xoffset = 0;
- ops->var.yoffset = p->yscroll * vc->vc_font.height;
- ops->var.vmode |= FB_VMODE_YWRAP;
- ops->update_start(info);
+ par->var.xoffset = 0;
+ par->var.yoffset = p->yscroll * vc->vc_font.height;
+ par->var.vmode |= FB_VMODE_YWRAP;
+ par->bitops->update_start(info);
scrollback_max += count;
if (scrollback_max > scrollback_phys_max)
scrollback_max = scrollback_phys_max;
@@ -1455,16 +1476,16 @@ static __inline__ void ywrap_up(struct vc_data *vc, int count)
static __inline__ void ywrap_down(struct vc_data *vc, int count)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
p->yscroll -= count;
if (p->yscroll < 0) /* Deal with wrap */
p->yscroll += p->vrows;
- ops->var.xoffset = 0;
- ops->var.yoffset = p->yscroll * vc->vc_font.height;
- ops->var.vmode |= FB_VMODE_YWRAP;
- ops->update_start(info);
+ par->var.xoffset = 0;
+ par->var.yoffset = p->yscroll * vc->vc_font.height;
+ par->var.vmode |= FB_VMODE_YWRAP;
+ par->bitops->update_start(info);
scrollback_max -= count;
if (scrollback_max < 0)
scrollback_max = 0;
@@ -1475,19 +1496,19 @@ static __inline__ void ypan_up(struct vc_data *vc, int count)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_display *p = &fb_display[vc->vc_num];
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
p->yscroll += count;
if (p->yscroll > p->vrows - vc->vc_rows) {
- ops->bmove(vc, info, p->vrows - vc->vc_rows,
- 0, 0, 0, vc->vc_rows, vc->vc_cols);
+ par->bitops->bmove(vc, info, p->vrows - vc->vc_rows,
+ 0, 0, 0, vc->vc_rows, vc->vc_cols);
p->yscroll -= p->vrows - vc->vc_rows;
}
- ops->var.xoffset = 0;
- ops->var.yoffset = p->yscroll * vc->vc_font.height;
- ops->var.vmode &= ~FB_VMODE_YWRAP;
- ops->update_start(info);
+ par->var.xoffset = 0;
+ par->var.yoffset = p->yscroll * vc->vc_font.height;
+ par->var.vmode &= ~FB_VMODE_YWRAP;
+ par->bitops->update_start(info);
fbcon_clear_margins(vc, 1);
scrollback_max += count;
if (scrollback_max > scrollback_phys_max)
@@ -1498,7 +1519,7 @@ static __inline__ void ypan_up(struct vc_data *vc, int count)
static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
p->yscroll += count;
@@ -1508,10 +1529,10 @@ static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count)
fbcon_redraw_move(vc, p, t + count, vc->vc_rows - count, t);
}
- ops->var.xoffset = 0;
- ops->var.yoffset = p->yscroll * vc->vc_font.height;
- ops->var.vmode &= ~FB_VMODE_YWRAP;
- ops->update_start(info);
+ par->var.xoffset = 0;
+ par->var.yoffset = p->yscroll * vc->vc_font.height;
+ par->var.vmode &= ~FB_VMODE_YWRAP;
+ par->bitops->update_start(info);
fbcon_clear_margins(vc, 1);
scrollback_max += count;
if (scrollback_max > scrollback_phys_max)
@@ -1523,19 +1544,19 @@ static __inline__ void ypan_down(struct vc_data *vc, int count)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_display *p = &fb_display[vc->vc_num];
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
p->yscroll -= count;
if (p->yscroll < 0) {
- ops->bmove(vc, info, 0, 0, p->vrows - vc->vc_rows,
- 0, vc->vc_rows, vc->vc_cols);
+ par->bitops->bmove(vc, info, 0, 0, p->vrows - vc->vc_rows,
+ 0, vc->vc_rows, vc->vc_cols);
p->yscroll += p->vrows - vc->vc_rows;
}
- ops->var.xoffset = 0;
- ops->var.yoffset = p->yscroll * vc->vc_font.height;
- ops->var.vmode &= ~FB_VMODE_YWRAP;
- ops->update_start(info);
+ par->var.xoffset = 0;
+ par->var.yoffset = p->yscroll * vc->vc_font.height;
+ par->var.vmode &= ~FB_VMODE_YWRAP;
+ par->bitops->update_start(info);
fbcon_clear_margins(vc, 1);
scrollback_max -= count;
if (scrollback_max < 0)
@@ -1546,7 +1567,7 @@ static __inline__ void ypan_down(struct vc_data *vc, int count)
static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
p->yscroll -= count;
@@ -1556,10 +1577,10 @@ static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count)
fbcon_redraw_move(vc, p, t, vc->vc_rows - count, t + count);
}
- ops->var.xoffset = 0;
- ops->var.yoffset = p->yscroll * vc->vc_font.height;
- ops->var.vmode &= ~FB_VMODE_YWRAP;
- ops->update_start(info);
+ par->var.xoffset = 0;
+ par->var.yoffset = p->yscroll * vc->vc_font.height;
+ par->var.vmode &= ~FB_VMODE_YWRAP;
+ par->bitops->update_start(info);
fbcon_clear_margins(vc, 1);
scrollback_max -= count;
if (scrollback_max < 0)
@@ -1608,7 +1629,7 @@ static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info,
unsigned short *d = (unsigned short *)
(vc->vc_origin + vc->vc_size_row * line);
unsigned short *s = d + offset;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
while (count--) {
unsigned short *start = s;
@@ -1621,8 +1642,8 @@ static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info,
if (c == scr_readw(d)) {
if (s > start) {
- ops->bmove(vc, info, line + ycount, x,
- line, x, 1, s-start);
+ par->bitops->bmove(vc, info, line + ycount, x,
+ line, x, 1, s - start);
x += s - start + 1;
start = s + 1;
} else {
@@ -1637,8 +1658,8 @@ static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info,
d++;
} while (s < le);
if (s > start)
- ops->bmove(vc, info, line + ycount, x, line, x, 1,
- s-start);
+ par->bitops->bmove(vc, info, line + ycount, x, line, x, 1,
+ s - start);
console_conditional_schedule();
if (ycount > 0)
line++;
@@ -1709,7 +1730,7 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy,
int dy, int dx, int height, int width, u_int y_break)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
u_int b;
if (sy < y_break && sy + height > y_break) {
@@ -1743,8 +1764,8 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy,
}
return;
}
- ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
- height, width);
+ par->bitops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
+ height, width);
}
static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
@@ -1971,15 +1992,13 @@ static void updatescrollmode_accel(struct fbcon_display *p,
struct vc_data *vc)
{
#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
int cap = info->flags;
u16 t = 0;
- int ypan = FBCON_SWAP(ops->rotate, info->fix.ypanstep,
- info->fix.xpanstep);
- int ywrap = FBCON_SWAP(ops->rotate, info->fix.ywrapstep, t);
- int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
- int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual,
- info->var.xres_virtual);
+ int ypan = FBCON_SWAP(par->rotate, info->fix.ypanstep, info->fix.xpanstep);
+ int ywrap = FBCON_SWAP(par->rotate, info->fix.ywrapstep, t);
+ int yres = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres);
+ int vyres = FBCON_SWAP(par->rotate, info->var.yres_virtual, info->var.xres_virtual);
int good_pan = (cap & FBINFO_HWACCEL_YPAN) &&
divides(ypan, vc->vc_font.height) && vyres > yres;
int good_wrap = (cap & FBINFO_HWACCEL_YWRAP) &&
@@ -2012,11 +2031,10 @@ static void updatescrollmode(struct fbcon_display *p,
struct fb_info *info,
struct vc_data *vc)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
int fh = vc->vc_font.height;
- int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
- int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual,
- info->var.xres_virtual);
+ int yres = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres);
+ int vyres = FBCON_SWAP(par->rotate, info->var.yres_virtual, info->var.xres_virtual);
p->vrows = vyres/fh;
if (yres > (fh * (vc->vc_rows + 1)))
@@ -2035,7 +2053,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
unsigned int height, bool from_user)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
struct fb_var_screeninfo var = info->var;
int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh;
@@ -2058,12 +2076,10 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
return -EINVAL;
}
- virt_w = FBCON_SWAP(ops->rotate, width, height);
- virt_h = FBCON_SWAP(ops->rotate, height, width);
- virt_fw = FBCON_SWAP(ops->rotate, vc->vc_font.width,
- vc->vc_font.height);
- virt_fh = FBCON_SWAP(ops->rotate, vc->vc_font.height,
- vc->vc_font.width);
+ virt_w = FBCON_SWAP(par->rotate, width, height);
+ virt_h = FBCON_SWAP(par->rotate, height, width);
+ virt_fw = FBCON_SWAP(par->rotate, vc->vc_font.width, vc->vc_font.height);
+ virt_fh = FBCON_SWAP(par->rotate, vc->vc_font.height, vc->vc_font.width);
var.xres = virt_w * virt_fw;
var.yres = virt_h * virt_fh;
x_diff = info->var.xres - var.xres;
@@ -2089,7 +2105,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
fb_set_var(info, &var);
}
var_to_display(p, &info->var, info);
- ops->var = info->var;
+ par->var = info->var;
}
updatescrollmode(p, info, vc);
return 0;
@@ -2098,13 +2114,13 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
static bool fbcon_switch(struct vc_data *vc)
{
struct fb_info *info, *old_info = NULL;
- struct fbcon_ops *ops;
+ struct fbcon_par *par;
struct fbcon_display *p = &fb_display[vc->vc_num];
struct fb_var_screeninfo var;
int i, ret, prev_console;
info = fbcon_info_from_console(vc->vc_num);
- ops = info->fbcon_par;
+ par = info->fbcon_par;
if (logo_shown >= 0) {
struct vc_data *conp2 = vc_cons[logo_shown].d;
@@ -2115,7 +2131,7 @@ static bool fbcon_switch(struct vc_data *vc)
logo_shown = FBCON_LOGO_CANSHOW;
}
- prev_console = ops->currcon;
+ prev_console = par->currcon;
if (prev_console != -1)
old_info = fbcon_info_from_console(prev_console);
/*
@@ -2128,9 +2144,9 @@ static bool fbcon_switch(struct vc_data *vc)
*/
fbcon_for_each_registered_fb(i) {
if (fbcon_registered_fb[i]->fbcon_par) {
- struct fbcon_ops *o = fbcon_registered_fb[i]->fbcon_par;
+ struct fbcon_par *par = fbcon_registered_fb[i]->fbcon_par;
- o->currcon = vc->vc_num;
+ par->currcon = vc->vc_num;
}
}
memset(&var, 0, sizeof(struct fb_var_screeninfo));
@@ -2144,7 +2160,7 @@ static bool fbcon_switch(struct vc_data *vc)
info->var.activate = var.activate;
var.vmode |= info->var.vmode & ~FB_VMODE_MASK;
fb_set_var(info, &var);
- ops->var = info->var;
+ par->var = info->var;
if (old_info != NULL && (old_info != info ||
info->flags & FBINFO_MISC_ALWAYS_SETPAR)) {
@@ -2161,17 +2177,16 @@ static bool fbcon_switch(struct vc_data *vc)
fbcon_del_cursor_work(old_info);
}
- if (!fbcon_is_active(vc, info) ||
- ops->blank_state != FB_BLANK_UNBLANK)
+ if (!fbcon_is_active(vc, info) || par->blank_state != FB_BLANK_UNBLANK)
fbcon_del_cursor_work(info);
else
fbcon_add_cursor_work(info);
set_blitting_type(vc, info);
- ops->cursor_reset = 1;
+ par->cursor_reset = 1;
- if (ops->rotate_font && ops->rotate_font(info, vc)) {
- ops->rotate = FB_ROTATE_UR;
+ if (par->bitops->rotate_font && par->bitops->rotate_font(info, vc)) {
+ par->rotate = FB_ROTATE_UR;
set_blitting_type(vc, info);
}
@@ -2202,8 +2217,8 @@ static bool fbcon_switch(struct vc_data *vc)
scrollback_current = 0;
if (fbcon_is_active(vc, info)) {
- ops->var.xoffset = ops->var.yoffset = p->yscroll = 0;
- ops->update_start(info);
+ par->var.xoffset = par->var.yoffset = p->yscroll = 0;
+ par->bitops->update_start(info);
}
fbcon_set_palette(vc, color_table);
@@ -2212,7 +2227,7 @@ static bool fbcon_switch(struct vc_data *vc)
if (logo_shown == FBCON_LOGO_DRAW) {
logo_shown = fg_console;
- fb_show_logo(info, ops->rotate);
+ fb_show_logo(info, par->rotate);
update_region(vc,
vc->vc_origin + vc->vc_size_row * vc->vc_top,
vc->vc_size_row * (vc->vc_bottom -
@@ -2241,27 +2256,27 @@ static bool fbcon_blank(struct vc_data *vc, enum vesa_blank_mode blank,
bool mode_switch)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
if (mode_switch) {
struct fb_var_screeninfo var = info->var;
- ops->graphics = 1;
+ par->graphics = 1;
if (!blank) {
var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE |
FB_ACTIVATE_KD_TEXT;
fb_set_var(info, &var);
- ops->graphics = 0;
- ops->var = info->var;
+ par->graphics = 0;
+ par->var = info->var;
}
}
if (fbcon_is_active(vc, info)) {
- if (ops->blank_state != blank) {
- ops->blank_state = blank;
+ if (par->blank_state != blank) {
+ par->blank_state = blank;
fbcon_cursor(vc, !blank);
- ops->cursor_flash = (!blank);
+ par->cursor_flash = (!blank);
if (fb_blank(info, blank))
fbcon_generic_blank(vc, info, blank);
@@ -2271,8 +2286,7 @@ static bool fbcon_blank(struct vc_data *vc, enum vesa_blank_mode blank,
update_screen(vc);
}
- if (mode_switch || !fbcon_is_active(vc, info) ||
- ops->blank_state != FB_BLANK_UNBLANK)
+ if (mode_switch || !fbcon_is_active(vc, info) || par->blank_state != FB_BLANK_UNBLANK)
fbcon_del_cursor_work(info);
else
fbcon_add_cursor_work(info);
@@ -2283,10 +2297,10 @@ static bool fbcon_blank(struct vc_data *vc, enum vesa_blank_mode blank,
static void fbcon_debug_enter(struct vc_data *vc)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
- ops->save_graphics = ops->graphics;
- ops->graphics = 0;
+ par->save_graphics = par->graphics;
+ par->graphics = 0;
if (info->fbops->fb_debug_enter)
info->fbops->fb_debug_enter(info);
fbcon_set_palette(vc, color_table);
@@ -2295,9 +2309,9 @@ static void fbcon_debug_enter(struct vc_data *vc)
static void fbcon_debug_leave(struct vc_data *vc)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
- ops->graphics = ops->save_graphics;
+ par->graphics = par->save_graphics;
if (info->fbops->fb_debug_leave)
info->fbops->fb_debug_leave(info);
}
@@ -2432,7 +2446,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
const u8 * data, int userfont)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
int resize, ret, old_userfont, old_width, old_height, old_charcount;
u8 *old_data = vc->vc_font.data;
@@ -2458,8 +2472,8 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
if (resize) {
int cols, rows;
- cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
- rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres);
+ rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres);
cols /= w;
rows /= h;
ret = vc_resize(vc, cols, rows);
@@ -2658,11 +2672,11 @@ static void fbcon_invert_region(struct vc_data *vc, u16 * p, int cnt)
void fbcon_suspended(struct fb_info *info)
{
struct vc_data *vc = NULL;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
- if (!ops || ops->currcon < 0)
+ if (!par || par->currcon < 0)
return;
- vc = vc_cons[ops->currcon].d;
+ vc = vc_cons[par->currcon].d;
/* Clear cursor, restore saved data */
fbcon_cursor(vc, false);
@@ -2671,27 +2685,27 @@ void fbcon_suspended(struct fb_info *info)
void fbcon_resumed(struct fb_info *info)
{
struct vc_data *vc;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
- if (!ops || ops->currcon < 0)
+ if (!par || par->currcon < 0)
return;
- vc = vc_cons[ops->currcon].d;
+ vc = vc_cons[par->currcon].d;
update_screen(vc);
}
static void fbcon_modechanged(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct vc_data *vc;
struct fbcon_display *p;
int rows, cols;
- if (!ops || ops->currcon < 0)
+ if (!par || par->currcon < 0)
return;
- vc = vc_cons[ops->currcon].d;
+ vc = vc_cons[par->currcon].d;
if (vc->vc_mode != KD_TEXT ||
- fbcon_info_from_console(ops->currcon) != info)
+ fbcon_info_from_console(par->currcon) != info)
return;
p = &fb_display[vc->vc_num];
@@ -2699,8 +2713,8 @@ static void fbcon_modechanged(struct fb_info *info)
if (con_is_visible(vc)) {
var_to_display(p, &info->var, info);
- cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
- rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres);
+ rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres);
cols /= vc->vc_font.width;
rows /= vc->vc_font.height;
vc_resize(vc, cols, rows);
@@ -2709,8 +2723,8 @@ static void fbcon_modechanged(struct fb_info *info)
scrollback_current = 0;
if (fbcon_is_active(vc, info)) {
- ops->var.xoffset = ops->var.yoffset = p->yscroll = 0;
- ops->update_start(info);
+ par->var.xoffset = par->var.yoffset = p->yscroll = 0;
+ par->bitops->update_start(info);
}
fbcon_set_palette(vc, color_table);
@@ -2720,12 +2734,12 @@ static void fbcon_modechanged(struct fb_info *info)
static void fbcon_set_all_vcs(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct vc_data *vc;
struct fbcon_display *p;
int i, rows, cols, fg = -1;
- if (!ops || ops->currcon < 0)
+ if (!par || par->currcon < 0)
return;
for (i = first_fb_vc; i <= last_fb_vc; i++) {
@@ -2742,8 +2756,8 @@ static void fbcon_set_all_vcs(struct fb_info *info)
p = &fb_display[vc->vc_num];
set_blitting_type(vc, info);
var_to_display(p, &info->var, info);
- cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
- rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres);
+ rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres);
cols /= vc->vc_font.width;
rows /= vc->vc_font.height;
vc_resize(vc, cols, rows);
@@ -2766,13 +2780,13 @@ EXPORT_SYMBOL(fbcon_update_vcs);
/* let fbcon check if it supports a new screen resolution */
int fbcon_modechange_possible(struct fb_info *info, struct fb_var_screeninfo *var)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct vc_data *vc;
unsigned int i;
WARN_CONSOLE_UNLOCKED();
- if (!ops)
+ if (!par)
return 0;
/* prevent setting a screen size which is smaller than font size */
@@ -3044,15 +3058,14 @@ int fbcon_fb_registered(struct fb_info *info)
void fbcon_fb_blanked(struct fb_info *info, int blank)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct vc_data *vc;
- if (!ops || ops->currcon < 0)
+ if (!par || par->currcon < 0)
return;
- vc = vc_cons[ops->currcon].d;
- if (vc->vc_mode != KD_TEXT ||
- fbcon_info_from_console(ops->currcon) != info)
+ vc = vc_cons[par->currcon].d;
+ if (vc->vc_mode != KD_TEXT || fbcon_info_from_console(par->currcon) != info)
return;
if (con_is_visible(vc)) {
@@ -3061,7 +3074,7 @@ void fbcon_fb_blanked(struct fb_info *info, int blank)
else
do_unblank_screen(0);
}
- ops->blank_state = blank;
+ par->blank_state = blank;
}
void fbcon_new_modelist(struct fb_info *info)
@@ -3251,7 +3264,7 @@ static ssize_t cursor_blink_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct fb_info *info;
- struct fbcon_ops *ops;
+ struct fbcon_par *par;
int idx, blink = -1;
console_lock();
@@ -3261,12 +3274,12 @@ static ssize_t cursor_blink_show(struct device *device,
goto err;
info = fbcon_registered_fb[idx];
- ops = info->fbcon_par;
+ par = info->fbcon_par;
- if (!ops)
+ if (!par)
goto err;
- blink = delayed_work_pending(&ops->cursor_work);
+ blink = delayed_work_pending(&par->cursor_work);
err:
console_unlock();
return sysfs_emit(buf, "%d\n", blink);
diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
index 4d97e6d8a16a..44ea4ae4bba0 100644
--- a/drivers/video/fbdev/core/fbcon.h
+++ b/drivers/video/fbdev/core/fbcon.h
@@ -51,7 +51,7 @@ struct fbcon_display {
const struct fb_videomode *mode;
};
-struct fbcon_ops {
+struct fbcon_bitops {
void (*bmove)(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int dy, int dx, int height, int width);
void (*clear)(struct vc_data *vc, struct fb_info *info, int sy,
@@ -65,6 +65,9 @@ struct fbcon_ops {
bool enable, int fg, int bg);
int (*update_start)(struct fb_info *info);
int (*rotate_font)(struct fb_info *info, struct vc_data *vc);
+};
+
+struct fbcon_par {
struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */
struct delayed_work cursor_work; /* Cursor timer */
struct fb_cursor cursor_state;
@@ -86,7 +89,10 @@ struct fbcon_ops {
u8 *cursor_src;
u32 cursor_size;
u32 fd_size;
+
+ const struct fbcon_bitops *bitops;
};
+
/*
* Attribute Decoding
*/
@@ -106,7 +112,6 @@ struct fbcon_ops {
((s) & 0x400)
#define attr_blink(s) \
((s) & 0x8000)
-
static inline int mono_col(const struct fb_info *info)
{
@@ -186,7 +191,7 @@ static inline u_short fb_scrollmode(struct fbcon_display *fb)
#ifdef CONFIG_FB_TILEBLITTING
extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info);
#endif
-extern void fbcon_set_bitops(struct fbcon_ops *ops);
+extern void fbcon_set_bitops_ur(struct fbcon_par *par);
extern int soft_cursor(struct fb_info *info, struct fb_cursor *cursor);
#define FBCON_ATTRIBUTE_UNDERLINE 1
@@ -224,10 +229,4 @@ static inline int get_attribute(struct fb_info *info, u16 c)
(void) (&_r == &_v); \
(i == FB_ROTATE_UR || i == FB_ROTATE_UD) ? _r : _v; })
-#ifdef CONFIG_FRAMEBUFFER_CONSOLE_ROTATION
-extern void fbcon_set_rotate(struct fbcon_ops *ops);
-#else
-#define fbcon_set_rotate(x) do {} while(0)
-#endif /* CONFIG_FRAMEBUFFER_CONSOLE_ROTATION */
-
#endif /* _VIDEO_FBCON_H */
diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c
index 89ef4ba7e867..2f394b5a17f7 100644
--- a/drivers/video/fbdev/core/fbcon_ccw.c
+++ b/drivers/video/fbdev/core/fbcon_ccw.c
@@ -63,9 +63,9 @@ static void ccw_update_attr(u8 *dst, u8 *src, int attribute,
static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int dy, int dx, int height, int width)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fb_copyarea area;
- u32 vyres = GETVYRES(ops->p, info);
+ u32 vyres = GETVYRES(par->p, info);
area.sx = sy * vc->vc_font.height;
area.sy = vyres - ((sx + width) * vc->vc_font.width);
@@ -80,9 +80,9 @@ static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width, int fg, int bg)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fb_fillrect region;
- u32 vyres = GETVYRES(ops->p, info);
+ u32 vyres = GETVYRES(par->p, info);
region.color = bg;
region.dx = sy * vc->vc_font.height;
@@ -99,13 +99,13 @@ static inline void ccw_putcs_aligned(struct vc_data *vc, struct fb_info *info,
u32 d_pitch, u32 s_pitch, u32 cellsize,
struct fb_image *image, u8 *buf, u8 *dst)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
u32 idx = (vc->vc_font.height + 7) >> 3;
u8 *src;
while (cnt--) {
- src = ops->fontbuffer + (scr_readw(s--) & charmask)*cellsize;
+ src = par->fontbuffer + (scr_readw(s--) & charmask) * cellsize;
if (attr) {
ccw_update_attr(buf, src, attr, vc);
@@ -130,7 +130,7 @@ static void ccw_putcs(struct vc_data *vc, struct fb_info *info,
int fg, int bg)
{
struct fb_image image;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
u32 width = (vc->vc_font.height + 7)/8;
u32 cellsize = width * vc->vc_font.width;
u32 maxcnt = info->pixmap.size/cellsize;
@@ -139,9 +139,9 @@ static void ccw_putcs(struct vc_data *vc, struct fb_info *info,
u32 cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
- u32 vyres = GETVYRES(ops->p, info);
+ u32 vyres = GETVYRES(par->p, info);
- if (!ops->fontbuffer)
+ if (!par->fontbuffer)
return;
image.fg_color = fg;
@@ -221,28 +221,28 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
int fg, int bg)
{
struct fb_cursor cursor;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
int w = (vc->vc_font.height + 7) >> 3, c;
- int y = real_y(ops->p, vc->state.y);
+ int y = real_y(par->p, vc->state.y);
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1, dx, dy;
char *src;
- u32 vyres = GETVYRES(ops->p, info);
+ u32 vyres = GETVYRES(par->p, info);
- if (!ops->fontbuffer)
+ if (!par->fontbuffer)
return;
cursor.set = 0;
c = scr_readw((u16 *) vc->vc_pos);
attribute = get_attribute(info, c);
- src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width));
+ src = par->fontbuffer + ((c & charmask) * (w * vc->vc_font.width));
- if (ops->cursor_state.image.data != src ||
- ops->cursor_reset) {
- ops->cursor_state.image.data = src;
- cursor.set |= FB_CUR_SETIMAGE;
+ if (par->cursor_state.image.data != src ||
+ par->cursor_reset) {
+ par->cursor_state.image.data = src;
+ cursor.set |= FB_CUR_SETIMAGE;
}
if (attribute) {
@@ -251,49 +251,49 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
dst = kmalloc_array(w, vc->vc_font.width, GFP_ATOMIC);
if (!dst)
return;
- kfree(ops->cursor_data);
- ops->cursor_data = dst;
+ kfree(par->cursor_data);
+ par->cursor_data = dst;
ccw_update_attr(dst, src, attribute, vc);
src = dst;
}
- if (ops->cursor_state.image.fg_color != fg ||
- ops->cursor_state.image.bg_color != bg ||
- ops->cursor_reset) {
- ops->cursor_state.image.fg_color = fg;
- ops->cursor_state.image.bg_color = bg;
+ if (par->cursor_state.image.fg_color != fg ||
+ par->cursor_state.image.bg_color != bg ||
+ par->cursor_reset) {
+ par->cursor_state.image.fg_color = fg;
+ par->cursor_state.image.bg_color = bg;
cursor.set |= FB_CUR_SETCMAP;
}
- if (ops->cursor_state.image.height != vc->vc_font.width ||
- ops->cursor_state.image.width != vc->vc_font.height ||
- ops->cursor_reset) {
- ops->cursor_state.image.height = vc->vc_font.width;
- ops->cursor_state.image.width = vc->vc_font.height;
+ if (par->cursor_state.image.height != vc->vc_font.width ||
+ par->cursor_state.image.width != vc->vc_font.height ||
+ par->cursor_reset) {
+ par->cursor_state.image.height = vc->vc_font.width;
+ par->cursor_state.image.width = vc->vc_font.height;
cursor.set |= FB_CUR_SETSIZE;
}
dx = y * vc->vc_font.height;
dy = vyres - ((vc->state.x + 1) * vc->vc_font.width);
- if (ops->cursor_state.image.dx != dx ||
- ops->cursor_state.image.dy != dy ||
- ops->cursor_reset) {
- ops->cursor_state.image.dx = dx;
- ops->cursor_state.image.dy = dy;
+ if (par->cursor_state.image.dx != dx ||
+ par->cursor_state.image.dy != dy ||
+ par->cursor_reset) {
+ par->cursor_state.image.dx = dx;
+ par->cursor_state.image.dy = dy;
cursor.set |= FB_CUR_SETPOS;
}
- if (ops->cursor_state.hot.x || ops->cursor_state.hot.y ||
- ops->cursor_reset) {
- ops->cursor_state.hot.x = cursor.hot.y = 0;
+ if (par->cursor_state.hot.x || par->cursor_state.hot.y ||
+ par->cursor_reset) {
+ par->cursor_state.hot.x = cursor.hot.y = 0;
cursor.set |= FB_CUR_SETHOT;
}
if (cursor.set & FB_CUR_SETSIZE ||
- vc->vc_cursor_type != ops->p->cursor_shape ||
- ops->cursor_state.mask == NULL ||
- ops->cursor_reset) {
+ vc->vc_cursor_type != par->p->cursor_shape ||
+ par->cursor_state.mask == NULL ||
+ par->cursor_reset) {
char *tmp, *mask = kmalloc_array(w, vc->vc_font.width,
GFP_ATOMIC);
int cur_height, size, i = 0;
@@ -309,13 +309,13 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
return;
}
- kfree(ops->cursor_state.mask);
- ops->cursor_state.mask = mask;
+ kfree(par->cursor_state.mask);
+ par->cursor_state.mask = mask;
- ops->p->cursor_shape = vc->vc_cursor_type;
+ par->p->cursor_shape = vc->vc_cursor_type;
cursor.set |= FB_CUR_SETSHAPE;
- switch (CUR_SIZE(ops->p->cursor_shape)) {
+ switch (CUR_SIZE(par->p->cursor_shape)) {
case CUR_NONE:
cur_height = 0;
break;
@@ -348,19 +348,19 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
kfree(tmp);
}
- ops->cursor_state.enable = enable && !use_sw;
+ par->cursor_state.enable = enable && !use_sw;
cursor.image.data = src;
- cursor.image.fg_color = ops->cursor_state.image.fg_color;
- cursor.image.bg_color = ops->cursor_state.image.bg_color;
- cursor.image.dx = ops->cursor_state.image.dx;
- cursor.image.dy = ops->cursor_state.image.dy;
- cursor.image.height = ops->cursor_state.image.height;
- cursor.image.width = ops->cursor_state.image.width;
- cursor.hot.x = ops->cursor_state.hot.x;
- cursor.hot.y = ops->cursor_state.hot.y;
- cursor.mask = ops->cursor_state.mask;
- cursor.enable = ops->cursor_state.enable;
+ cursor.image.fg_color = par->cursor_state.image.fg_color;
+ cursor.image.bg_color = par->cursor_state.image.bg_color;
+ cursor.image.dx = par->cursor_state.image.dx;
+ cursor.image.dy = par->cursor_state.image.dy;
+ cursor.image.height = par->cursor_state.image.height;
+ cursor.image.width = par->cursor_state.image.width;
+ cursor.hot.x = par->cursor_state.hot.x;
+ cursor.hot.y = par->cursor_state.hot.y;
+ cursor.mask = par->cursor_state.mask;
+ cursor.enable = par->cursor_state.enable;
cursor.image.depth = 1;
cursor.rop = ROP_XOR;
@@ -370,32 +370,37 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
if (err)
soft_cursor(info, &cursor);
- ops->cursor_reset = 0;
+ par->cursor_reset = 0;
}
static int ccw_update_start(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
u32 yoffset;
- u32 vyres = GETVYRES(ops->p, info);
+ u32 vyres = GETVYRES(par->p, info);
int err;
- yoffset = (vyres - info->var.yres) - ops->var.xoffset;
- ops->var.xoffset = ops->var.yoffset;
- ops->var.yoffset = yoffset;
- err = fb_pan_display(info, &ops->var);
- ops->var.xoffset = info->var.xoffset;
- ops->var.yoffset = info->var.yoffset;
- ops->var.vmode = info->var.vmode;
+ yoffset = (vyres - info->var.yres) - par->var.xoffset;
+ par->var.xoffset = par->var.yoffset;
+ par->var.yoffset = yoffset;
+ err = fb_pan_display(info, &par->var);
+ par->var.xoffset = info->var.xoffset;
+ par->var.yoffset = info->var.yoffset;
+ par->var.vmode = info->var.vmode;
return err;
}
-void fbcon_rotate_ccw(struct fbcon_ops *ops)
+static const struct fbcon_bitops ccw_fbcon_bitops = {
+ .bmove = ccw_bmove,
+ .clear = ccw_clear,
+ .putcs = ccw_putcs,
+ .clear_margins = ccw_clear_margins,
+ .cursor = ccw_cursor,
+ .update_start = ccw_update_start,
+ .rotate_font = fbcon_rotate_font,
+};
+
+void fbcon_set_bitops_ccw(struct fbcon_par *par)
{
- ops->bmove = ccw_bmove;
- ops->clear = ccw_clear;
- ops->putcs = ccw_putcs;
- ops->clear_margins = ccw_clear_margins;
- ops->cursor = ccw_cursor;
- ops->update_start = ccw_update_start;
+ par->bitops = &ccw_fbcon_bitops;
}
diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c
index b9dac7940fb7..3c3ad3471ec4 100644
--- a/drivers/video/fbdev/core/fbcon_cw.c
+++ b/drivers/video/fbdev/core/fbcon_cw.c
@@ -48,9 +48,9 @@ static void cw_update_attr(u8 *dst, u8 *src, int attribute,
static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int dy, int dx, int height, int width)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fb_copyarea area;
- u32 vxres = GETVXRES(ops->p, info);
+ u32 vxres = GETVXRES(par->p, info);
area.sx = vxres - ((sy + height) * vc->vc_font.height);
area.sy = sx * vc->vc_font.width;
@@ -65,9 +65,9 @@ static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width, int fg, int bg)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fb_fillrect region;
- u32 vxres = GETVXRES(ops->p, info);
+ u32 vxres = GETVXRES(par->p, info);
region.color = bg;
region.dx = vxres - ((sy + height) * vc->vc_font.height);
@@ -84,13 +84,13 @@ static inline void cw_putcs_aligned(struct vc_data *vc, struct fb_info *info,
u32 d_pitch, u32 s_pitch, u32 cellsize,
struct fb_image *image, u8 *buf, u8 *dst)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
u32 idx = (vc->vc_font.height + 7) >> 3;
u8 *src;
while (cnt--) {
- src = ops->fontbuffer + (scr_readw(s++) & charmask)*cellsize;
+ src = par->fontbuffer + (scr_readw(s++) & charmask) * cellsize;
if (attr) {
cw_update_attr(buf, src, attr, vc);
@@ -115,7 +115,7 @@ static void cw_putcs(struct vc_data *vc, struct fb_info *info,
int fg, int bg)
{
struct fb_image image;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
u32 width = (vc->vc_font.height + 7)/8;
u32 cellsize = width * vc->vc_font.width;
u32 maxcnt = info->pixmap.size/cellsize;
@@ -124,9 +124,9 @@ static void cw_putcs(struct vc_data *vc, struct fb_info *info,
u32 cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
- u32 vxres = GETVXRES(ops->p, info);
+ u32 vxres = GETVXRES(par->p, info);
- if (!ops->fontbuffer)
+ if (!par->fontbuffer)
return;
image.fg_color = fg;
@@ -204,28 +204,28 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
int fg, int bg)
{
struct fb_cursor cursor;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
int w = (vc->vc_font.height + 7) >> 3, c;
- int y = real_y(ops->p, vc->state.y);
+ int y = real_y(par->p, vc->state.y);
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1, dx, dy;
char *src;
- u32 vxres = GETVXRES(ops->p, info);
+ u32 vxres = GETVXRES(par->p, info);
- if (!ops->fontbuffer)
+ if (!par->fontbuffer)
return;
cursor.set = 0;
c = scr_readw((u16 *) vc->vc_pos);
attribute = get_attribute(info, c);
- src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width));
+ src = par->fontbuffer + ((c & charmask) * (w * vc->vc_font.width));
- if (ops->cursor_state.image.data != src ||
- ops->cursor_reset) {
- ops->cursor_state.image.data = src;
- cursor.set |= FB_CUR_SETIMAGE;
+ if (par->cursor_state.image.data != src ||
+ par->cursor_reset) {
+ par->cursor_state.image.data = src;
+ cursor.set |= FB_CUR_SETIMAGE;
}
if (attribute) {
@@ -234,49 +234,49 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
dst = kmalloc_array(w, vc->vc_font.width, GFP_ATOMIC);
if (!dst)
return;
- kfree(ops->cursor_data);
- ops->cursor_data = dst;
+ kfree(par->cursor_data);
+ par->cursor_data = dst;
cw_update_attr(dst, src, attribute, vc);
src = dst;
}
- if (ops->cursor_state.image.fg_color != fg ||
- ops->cursor_state.image.bg_color != bg ||
- ops->cursor_reset) {
- ops->cursor_state.image.fg_color = fg;
- ops->cursor_state.image.bg_color = bg;
+ if (par->cursor_state.image.fg_color != fg ||
+ par->cursor_state.image.bg_color != bg ||
+ par->cursor_reset) {
+ par->cursor_state.image.fg_color = fg;
+ par->cursor_state.image.bg_color = bg;
cursor.set |= FB_CUR_SETCMAP;
}
- if (ops->cursor_state.image.height != vc->vc_font.width ||
- ops->cursor_state.image.width != vc->vc_font.height ||
- ops->cursor_reset) {
- ops->cursor_state.image.height = vc->vc_font.width;
- ops->cursor_state.image.width = vc->vc_font.height;
+ if (par->cursor_state.image.height != vc->vc_font.width ||
+ par->cursor_state.image.width != vc->vc_font.height ||
+ par->cursor_reset) {
+ par->cursor_state.image.height = vc->vc_font.width;
+ par->cursor_state.image.width = vc->vc_font.height;
cursor.set |= FB_CUR_SETSIZE;
}
dx = vxres - ((y * vc->vc_font.height) + vc->vc_font.height);
dy = vc->state.x * vc->vc_font.width;
- if (ops->cursor_state.image.dx != dx ||
- ops->cursor_state.image.dy != dy ||
- ops->cursor_reset) {
- ops->cursor_state.image.dx = dx;
- ops->cursor_state.image.dy = dy;
+ if (par->cursor_state.image.dx != dx ||
+ par->cursor_state.image.dy != dy ||
+ par->cursor_reset) {
+ par->cursor_state.image.dx = dx;
+ par->cursor_state.image.dy = dy;
cursor.set |= FB_CUR_SETPOS;
}
- if (ops->cursor_state.hot.x || ops->cursor_state.hot.y ||
- ops->cursor_reset) {
- ops->cursor_state.hot.x = cursor.hot.y = 0;
+ if (par->cursor_state.hot.x || par->cursor_state.hot.y ||
+ par->cursor_reset) {
+ par->cursor_state.hot.x = cursor.hot.y = 0;
cursor.set |= FB_CUR_SETHOT;
}
if (cursor.set & FB_CUR_SETSIZE ||
- vc->vc_cursor_type != ops->p->cursor_shape ||
- ops->cursor_state.mask == NULL ||
- ops->cursor_reset) {
+ vc->vc_cursor_type != par->p->cursor_shape ||
+ par->cursor_state.mask == NULL ||
+ par->cursor_reset) {
char *tmp, *mask = kmalloc_array(w, vc->vc_font.width,
GFP_ATOMIC);
int cur_height, size, i = 0;
@@ -292,13 +292,13 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
return;
}
- kfree(ops->cursor_state.mask);
- ops->cursor_state.mask = mask;
+ kfree(par->cursor_state.mask);
+ par->cursor_state.mask = mask;
- ops->p->cursor_shape = vc->vc_cursor_type;
+ par->p->cursor_shape = vc->vc_cursor_type;
cursor.set |= FB_CUR_SETSHAPE;
- switch (CUR_SIZE(ops->p->cursor_shape)) {
+ switch (CUR_SIZE(par->p->cursor_shape)) {
case CUR_NONE:
cur_height = 0;
break;
@@ -331,19 +331,19 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
kfree(tmp);
}
- ops->cursor_state.enable = enable && !use_sw;
+ par->cursor_state.enable = enable && !use_sw;
cursor.image.data = src;
- cursor.image.fg_color = ops->cursor_state.image.fg_color;
- cursor.image.bg_color = ops->cursor_state.image.bg_color;
- cursor.image.dx = ops->cursor_state.image.dx;
- cursor.image.dy = ops->cursor_state.image.dy;
- cursor.image.height = ops->cursor_state.image.height;
- cursor.image.width = ops->cursor_state.image.width;
- cursor.hot.x = ops->cursor_state.hot.x;
- cursor.hot.y = ops->cursor_state.hot.y;
- cursor.mask = ops->cursor_state.mask;
- cursor.enable = ops->cursor_state.enable;
+ cursor.image.fg_color = par->cursor_state.image.fg_color;
+ cursor.image.bg_color = par->cursor_state.image.bg_color;
+ cursor.image.dx = par->cursor_state.image.dx;
+ cursor.image.dy = par->cursor_state.image.dy;
+ cursor.image.height = par->cursor_state.image.height;
+ cursor.image.width = par->cursor_state.image.width;
+ cursor.hot.x = par->cursor_state.hot.x;
+ cursor.hot.y = par->cursor_state.hot.y;
+ cursor.mask = par->cursor_state.mask;
+ cursor.enable = par->cursor_state.enable;
cursor.image.depth = 1;
cursor.rop = ROP_XOR;
@@ -353,32 +353,37 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
if (err)
soft_cursor(info, &cursor);
- ops->cursor_reset = 0;
+ par->cursor_reset = 0;
}
static int cw_update_start(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
- u32 vxres = GETVXRES(ops->p, info);
+ struct fbcon_par *par = info->fbcon_par;
+ u32 vxres = GETVXRES(par->p, info);
u32 xoffset;
int err;
- xoffset = vxres - (info->var.xres + ops->var.yoffset);
- ops->var.yoffset = ops->var.xoffset;
- ops->var.xoffset = xoffset;
- err = fb_pan_display(info, &ops->var);
- ops->var.xoffset = info->var.xoffset;
- ops->var.yoffset = info->var.yoffset;
- ops->var.vmode = info->var.vmode;
+ xoffset = vxres - (info->var.xres + par->var.yoffset);
+ par->var.yoffset = par->var.xoffset;
+ par->var.xoffset = xoffset;
+ err = fb_pan_display(info, &par->var);
+ par->var.xoffset = info->var.xoffset;
+ par->var.yoffset = info->var.yoffset;
+ par->var.vmode = info->var.vmode;
return err;
}
-void fbcon_rotate_cw(struct fbcon_ops *ops)
+static const struct fbcon_bitops cw_fbcon_bitops = {
+ .bmove = cw_bmove,
+ .clear = cw_clear,
+ .putcs = cw_putcs,
+ .clear_margins = cw_clear_margins,
+ .cursor = cw_cursor,
+ .update_start = cw_update_start,
+ .rotate_font = fbcon_rotate_font,
+};
+
+void fbcon_set_bitops_cw(struct fbcon_par *par)
{
- ops->bmove = cw_bmove;
- ops->clear = cw_clear;
- ops->putcs = cw_putcs;
- ops->clear_margins = cw_clear_margins;
- ops->cursor = cw_cursor;
- ops->update_start = cw_update_start;
+ par->bitops = &cw_fbcon_bitops;
}
diff --git a/drivers/video/fbdev/core/fbcon_rotate.c b/drivers/video/fbdev/core/fbcon_rotate.c
index ec3c883400f7..1562a8f20b4f 100644
--- a/drivers/video/fbdev/core/fbcon_rotate.c
+++ b/drivers/video/fbdev/core/fbcon_rotate.c
@@ -18,34 +18,34 @@
#include "fbcon.h"
#include "fbcon_rotate.h"
-static int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc)
+int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
int len, err = 0;
int s_cellsize, d_cellsize, i;
const u8 *src;
u8 *dst;
- if (vc->vc_font.data == ops->fontdata &&
- ops->p->con_rotate == ops->cur_rotate)
+ if (vc->vc_font.data == par->fontdata &&
+ par->p->con_rotate == par->cur_rotate)
goto finished;
- src = ops->fontdata = vc->vc_font.data;
- ops->cur_rotate = ops->p->con_rotate;
+ src = par->fontdata = vc->vc_font.data;
+ par->cur_rotate = par->p->con_rotate;
len = vc->vc_font.charcount;
s_cellsize = ((vc->vc_font.width + 7)/8) *
vc->vc_font.height;
d_cellsize = s_cellsize;
- if (ops->rotate == FB_ROTATE_CW ||
- ops->rotate == FB_ROTATE_CCW)
+ if (par->rotate == FB_ROTATE_CW ||
+ par->rotate == FB_ROTATE_CCW)
d_cellsize = ((vc->vc_font.height + 7)/8) *
vc->vc_font.width;
if (info->fbops->fb_sync)
info->fbops->fb_sync(info);
- if (ops->fd_size < d_cellsize * len) {
+ if (par->fd_size < d_cellsize * len) {
dst = kmalloc_array(len, d_cellsize, GFP_KERNEL);
if (dst == NULL) {
@@ -53,15 +53,15 @@ static int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc)
goto finished;
}
- ops->fd_size = d_cellsize * len;
- kfree(ops->fontbuffer);
- ops->fontbuffer = dst;
+ par->fd_size = d_cellsize * len;
+ kfree(par->fontbuffer);
+ par->fontbuffer = dst;
}
- dst = ops->fontbuffer;
- memset(dst, 0, ops->fd_size);
+ dst = par->fontbuffer;
+ memset(dst, 0, par->fd_size);
- switch (ops->rotate) {
+ switch (par->rotate) {
case FB_ROTATE_UD:
for (i = len; i--; ) {
rotate_ud(src, dst, vc->vc_font.width,
@@ -92,20 +92,3 @@ static int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc)
finished:
return err;
}
-
-void fbcon_set_rotate(struct fbcon_ops *ops)
-{
- ops->rotate_font = fbcon_rotate_font;
-
- switch(ops->rotate) {
- case FB_ROTATE_CW:
- fbcon_rotate_cw(ops);
- break;
- case FB_ROTATE_UD:
- fbcon_rotate_ud(ops);
- break;
- case FB_ROTATE_CCW:
- fbcon_rotate_ccw(ops);
- break;
- }
-}
diff --git a/drivers/video/fbdev/core/fbcon_rotate.h b/drivers/video/fbdev/core/fbcon_rotate.h
index 01cbe303b8a2..8cb019e8a9c0 100644
--- a/drivers/video/fbdev/core/fbcon_rotate.h
+++ b/drivers/video/fbdev/core/fbcon_rotate.h
@@ -90,7 +90,19 @@ static inline void rotate_ccw(const char *in, char *out, u32 width, u32 height)
}
}
-extern void fbcon_rotate_cw(struct fbcon_ops *ops);
-extern void fbcon_rotate_ud(struct fbcon_ops *ops);
-extern void fbcon_rotate_ccw(struct fbcon_ops *ops);
+int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc);
+
+#if defined(CONFIG_FRAMEBUFFER_CONSOLE_ROTATION)
+void fbcon_set_bitops_cw(struct fbcon_par *par);
+void fbcon_set_bitops_ud(struct fbcon_par *par);
+void fbcon_set_bitops_ccw(struct fbcon_par *par);
+#else
+static inline void fbcon_set_bitops_cw(struct fbcon_par *par)
+{ }
+static inline void fbcon_set_bitops_ud(struct fbcon_par *par)
+{ }
+static inline void fbcon_set_bitops_ccw(struct fbcon_par *par)
+{ }
+#endif
+
#endif
diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c
index 0af7913a2abd..6fc30cad5b19 100644
--- a/drivers/video/fbdev/core/fbcon_ud.c
+++ b/drivers/video/fbdev/core/fbcon_ud.c
@@ -48,10 +48,10 @@ static void ud_update_attr(u8 *dst, u8 *src, int attribute,
static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int dy, int dx, int height, int width)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fb_copyarea area;
- u32 vyres = GETVYRES(ops->p, info);
- u32 vxres = GETVXRES(ops->p, info);
+ u32 vyres = GETVYRES(par->p, info);
+ u32 vxres = GETVXRES(par->p, info);
area.sy = vyres - ((sy + height) * vc->vc_font.height);
area.sx = vxres - ((sx + width) * vc->vc_font.width);
@@ -66,10 +66,10 @@ static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy,
static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width, int fg, int bg)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
struct fb_fillrect region;
- u32 vyres = GETVYRES(ops->p, info);
- u32 vxres = GETVXRES(ops->p, info);
+ u32 vyres = GETVYRES(par->p, info);
+ u32 vxres = GETVXRES(par->p, info);
region.color = bg;
region.dy = vyres - ((sy + height) * vc->vc_font.height);
@@ -86,13 +86,13 @@ static inline void ud_putcs_aligned(struct vc_data *vc, struct fb_info *info,
u32 d_pitch, u32 s_pitch, u32 cellsize,
struct fb_image *image, u8 *buf, u8 *dst)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
u32 idx = vc->vc_font.width >> 3;
u8 *src;
while (cnt--) {
- src = ops->fontbuffer + (scr_readw(s--) & charmask)*cellsize;
+ src = par->fontbuffer + (scr_readw(s--) & charmask) * cellsize;
if (attr) {
ud_update_attr(buf, src, attr, vc);
@@ -119,7 +119,7 @@ static inline void ud_putcs_unaligned(struct vc_data *vc,
struct fb_image *image, u8 *buf,
u8 *dst)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
u32 shift_low = 0, mod = vc->vc_font.width % 8;
u32 shift_high = 8;
@@ -127,7 +127,7 @@ static inline void ud_putcs_unaligned(struct vc_data *vc,
u8 *src;
while (cnt--) {
- src = ops->fontbuffer + (scr_readw(s--) & charmask)*cellsize;
+ src = par->fontbuffer + (scr_readw(s--) & charmask) * cellsize;
if (attr) {
ud_update_attr(buf, src, attr, vc);
@@ -152,7 +152,7 @@ static void ud_putcs(struct vc_data *vc, struct fb_info *info,
int fg, int bg)
{
struct fb_image image;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
u32 width = (vc->vc_font.width + 7)/8;
u32 cellsize = width * vc->vc_font.height;
u32 maxcnt = info->pixmap.size/cellsize;
@@ -161,10 +161,10 @@ static void ud_putcs(struct vc_data *vc, struct fb_info *info,
u32 mod = vc->vc_font.width % 8, cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
- u32 vyres = GETVYRES(ops->p, info);
- u32 vxres = GETVXRES(ops->p, info);
+ u32 vyres = GETVYRES(par->p, info);
+ u32 vxres = GETVXRES(par->p, info);
- if (!ops->fontbuffer)
+ if (!par->fontbuffer)
return;
image.fg_color = fg;
@@ -251,29 +251,29 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
int fg, int bg)
{
struct fb_cursor cursor;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
int w = (vc->vc_font.width + 7) >> 3, c;
- int y = real_y(ops->p, vc->state.y);
+ int y = real_y(par->p, vc->state.y);
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1, dx, dy;
char *src;
- u32 vyres = GETVYRES(ops->p, info);
- u32 vxres = GETVXRES(ops->p, info);
+ u32 vyres = GETVYRES(par->p, info);
+ u32 vxres = GETVXRES(par->p, info);
- if (!ops->fontbuffer)
+ if (!par->fontbuffer)
return;
cursor.set = 0;
c = scr_readw((u16 *) vc->vc_pos);
attribute = get_attribute(info, c);
- src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.height));
+ src = par->fontbuffer + ((c & charmask) * (w * vc->vc_font.height));
- if (ops->cursor_state.image.data != src ||
- ops->cursor_reset) {
- ops->cursor_state.image.data = src;
- cursor.set |= FB_CUR_SETIMAGE;
+ if (par->cursor_state.image.data != src ||
+ par->cursor_reset) {
+ par->cursor_state.image.data = src;
+ cursor.set |= FB_CUR_SETIMAGE;
}
if (attribute) {
@@ -282,49 +282,49 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
dst = kmalloc_array(w, vc->vc_font.height, GFP_ATOMIC);
if (!dst)
return;
- kfree(ops->cursor_data);
- ops->cursor_data = dst;
+ kfree(par->cursor_data);
+ par->cursor_data = dst;
ud_update_attr(dst, src, attribute, vc);
src = dst;
}
- if (ops->cursor_state.image.fg_color != fg ||
- ops->cursor_state.image.bg_color != bg ||
- ops->cursor_reset) {
- ops->cursor_state.image.fg_color = fg;
- ops->cursor_state.image.bg_color = bg;
+ if (par->cursor_state.image.fg_color != fg ||
+ par->cursor_state.image.bg_color != bg ||
+ par->cursor_reset) {
+ par->cursor_state.image.fg_color = fg;
+ par->cursor_state.image.bg_color = bg;
cursor.set |= FB_CUR_SETCMAP;
}
- if (ops->cursor_state.image.height != vc->vc_font.height ||
- ops->cursor_state.image.width != vc->vc_font.width ||
- ops->cursor_reset) {
- ops->cursor_state.image.height = vc->vc_font.height;
- ops->cursor_state.image.width = vc->vc_font.width;
+ if (par->cursor_state.image.height != vc->vc_font.height ||
+ par->cursor_state.image.width != vc->vc_font.width ||
+ par->cursor_reset) {
+ par->cursor_state.image.height = vc->vc_font.height;
+ par->cursor_state.image.width = vc->vc_font.width;
cursor.set |= FB_CUR_SETSIZE;
}
dy = vyres - ((y * vc->vc_font.height) + vc->vc_font.height);
dx = vxres - ((vc->state.x * vc->vc_font.width) + vc->vc_font.width);
- if (ops->cursor_state.image.dx != dx ||
- ops->cursor_state.image.dy != dy ||
- ops->cursor_reset) {
- ops->cursor_state.image.dx = dx;
- ops->cursor_state.image.dy = dy;
+ if (par->cursor_state.image.dx != dx ||
+ par->cursor_state.image.dy != dy ||
+ par->cursor_reset) {
+ par->cursor_state.image.dx = dx;
+ par->cursor_state.image.dy = dy;
cursor.set |= FB_CUR_SETPOS;
}
- if (ops->cursor_state.hot.x || ops->cursor_state.hot.y ||
- ops->cursor_reset) {
- ops->cursor_state.hot.x = cursor.hot.y = 0;
+ if (par->cursor_state.hot.x || par->cursor_state.hot.y ||
+ par->cursor_reset) {
+ par->cursor_state.hot.x = cursor.hot.y = 0;
cursor.set |= FB_CUR_SETHOT;
}
if (cursor.set & FB_CUR_SETSIZE ||
- vc->vc_cursor_type != ops->p->cursor_shape ||
- ops->cursor_state.mask == NULL ||
- ops->cursor_reset) {
+ vc->vc_cursor_type != par->p->cursor_shape ||
+ par->cursor_state.mask == NULL ||
+ par->cursor_reset) {
char *mask = kmalloc_array(w, vc->vc_font.height, GFP_ATOMIC);
int cur_height, size, i = 0;
u8 msk = 0xff;
@@ -332,13 +332,13 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
if (!mask)
return;
- kfree(ops->cursor_state.mask);
- ops->cursor_state.mask = mask;
+ kfree(par->cursor_state.mask);
+ par->cursor_state.mask = mask;
- ops->p->cursor_shape = vc->vc_cursor_type;
+ par->p->cursor_shape = vc->vc_cursor_type;
cursor.set |= FB_CUR_SETSHAPE;
- switch (CUR_SIZE(ops->p->cursor_shape)) {
+ switch (CUR_SIZE(par->p->cursor_shape)) {
case CUR_NONE:
cur_height = 0;
break;
@@ -371,19 +371,19 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
mask[i++] = ~msk;
}
- ops->cursor_state.enable = enable && !use_sw;
+ par->cursor_state.enable = enable && !use_sw;
cursor.image.data = src;
- cursor.image.fg_color = ops->cursor_state.image.fg_color;
- cursor.image.bg_color = ops->cursor_state.image.bg_color;
- cursor.image.dx = ops->cursor_state.image.dx;
- cursor.image.dy = ops->cursor_state.image.dy;
- cursor.image.height = ops->cursor_state.image.height;
- cursor.image.width = ops->cursor_state.image.width;
- cursor.hot.x = ops->cursor_state.hot.x;
- cursor.hot.y = ops->cursor_state.hot.y;
- cursor.mask = ops->cursor_state.mask;
- cursor.enable = ops->cursor_state.enable;
+ cursor.image.fg_color = par->cursor_state.image.fg_color;
+ cursor.image.bg_color = par->cursor_state.image.bg_color;
+ cursor.image.dx = par->cursor_state.image.dx;
+ cursor.image.dy = par->cursor_state.image.dy;
+ cursor.image.height = par->cursor_state.image.height;
+ cursor.image.width = par->cursor_state.image.width;
+ cursor.hot.x = par->cursor_state.hot.x;
+ cursor.hot.y = par->cursor_state.hot.y;
+ cursor.mask = par->cursor_state.mask;
+ cursor.enable = par->cursor_state.enable;
cursor.image.depth = 1;
cursor.rop = ROP_XOR;
@@ -393,36 +393,41 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
if (err)
soft_cursor(info, &cursor);
- ops->cursor_reset = 0;
+ par->cursor_reset = 0;
}
static int ud_update_start(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
int xoffset, yoffset;
- u32 vyres = GETVYRES(ops->p, info);
- u32 vxres = GETVXRES(ops->p, info);
+ u32 vyres = GETVYRES(par->p, info);
+ u32 vxres = GETVXRES(par->p, info);
int err;
- xoffset = vxres - info->var.xres - ops->var.xoffset;
- yoffset = vyres - info->var.yres - ops->var.yoffset;
+ xoffset = vxres - info->var.xres - par->var.xoffset;
+ yoffset = vyres - info->var.yres - par->var.yoffset;
if (yoffset < 0)
yoffset += vyres;
- ops->var.xoffset = xoffset;
- ops->var.yoffset = yoffset;
- err = fb_pan_display(info, &ops->var);
- ops->var.xoffset = info->var.xoffset;
- ops->var.yoffset = info->var.yoffset;
- ops->var.vmode = info->var.vmode;
+ par->var.xoffset = xoffset;
+ par->var.yoffset = yoffset;
+ err = fb_pan_display(info, &par->var);
+ par->var.xoffset = info->var.xoffset;
+ par->var.yoffset = info->var.yoffset;
+ par->var.vmode = info->var.vmode;
return err;
}
-void fbcon_rotate_ud(struct fbcon_ops *ops)
+static const struct fbcon_bitops ud_fbcon_bitops = {
+ .bmove = ud_bmove,
+ .clear = ud_clear,
+ .putcs = ud_putcs,
+ .clear_margins = ud_clear_margins,
+ .cursor = ud_cursor,
+ .update_start = ud_update_start,
+ .rotate_font = fbcon_rotate_font,
+};
+
+void fbcon_set_bitops_ud(struct fbcon_par *par)
{
- ops->bmove = ud_bmove;
- ops->clear = ud_clear;
- ops->putcs = ud_putcs;
- ops->clear_margins = ud_clear_margins;
- ops->cursor = ud_cursor;
- ops->update_start = ud_update_start;
+ par->bitops = &ud_fbcon_bitops;
}
diff --git a/drivers/video/fbdev/core/softcursor.c b/drivers/video/fbdev/core/softcursor.c
index 29e5b21cf373..900788c05915 100644
--- a/drivers/video/fbdev/core/softcursor.c
+++ b/drivers/video/fbdev/core/softcursor.c
@@ -21,7 +21,7 @@
int soft_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
unsigned int scan_align = info->pixmap.scan_align - 1;
unsigned int buf_align = info->pixmap.buf_align - 1;
unsigned int i, size, dsize, s_pitch, d_pitch;
@@ -34,19 +34,19 @@ int soft_cursor(struct fb_info *info, struct fb_cursor *cursor)
s_pitch = (cursor->image.width + 7) >> 3;
dsize = s_pitch * cursor->image.height;
- if (dsize + sizeof(struct fb_image) != ops->cursor_size) {
- kfree(ops->cursor_src);
- ops->cursor_size = dsize + sizeof(struct fb_image);
+ if (dsize + sizeof(struct fb_image) != par->cursor_size) {
+ kfree(par->cursor_src);
+ par->cursor_size = dsize + sizeof(struct fb_image);
- ops->cursor_src = kmalloc(ops->cursor_size, GFP_ATOMIC);
- if (!ops->cursor_src) {
- ops->cursor_size = 0;
+ par->cursor_src = kmalloc(par->cursor_size, GFP_ATOMIC);
+ if (!par->cursor_src) {
+ par->cursor_size = 0;
return -ENOMEM;
}
}
- src = ops->cursor_src + sizeof(struct fb_image);
- image = (struct fb_image *)ops->cursor_src;
+ src = par->cursor_src + sizeof(struct fb_image);
+ image = (struct fb_image *)par->cursor_src;
*image = cursor->image;
d_pitch = (s_pitch + scan_align) & ~scan_align;
diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c
index d342b90c42b7..a9db668caf72 100644
--- a/drivers/video/fbdev/core/tileblit.c
+++ b/drivers/video/fbdev/core/tileblit.c
@@ -151,34 +151,38 @@ static void tile_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
static int tile_update_start(struct fb_info *info)
{
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
int err;
- err = fb_pan_display(info, &ops->var);
- ops->var.xoffset = info->var.xoffset;
- ops->var.yoffset = info->var.yoffset;
- ops->var.vmode = info->var.vmode;
+ err = fb_pan_display(info, &par->var);
+ par->var.xoffset = info->var.xoffset;
+ par->var.yoffset = info->var.yoffset;
+ par->var.vmode = info->var.vmode;
return err;
}
+static const struct fbcon_bitops tile_fbcon_bitops = {
+ .bmove = tile_bmove,
+ .clear = tile_clear,
+ .putcs = tile_putcs,
+ .clear_margins = tile_clear_margins,
+ .cursor = tile_cursor,
+ .update_start = tile_update_start,
+};
+
void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info)
{
struct fb_tilemap map;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_par *par = info->fbcon_par;
- ops->bmove = tile_bmove;
- ops->clear = tile_clear;
- ops->putcs = tile_putcs;
- ops->clear_margins = tile_clear_margins;
- ops->cursor = tile_cursor;
- ops->update_start = tile_update_start;
+ par->bitops = &tile_fbcon_bitops;
- if (ops->p) {
+ if (par->p) {
map.width = vc->vc_font.width;
map.height = vc->vc_font.height;
map.depth = 1;
map.length = vc->vc_font.charcount;
- map.data = ops->p->fontdata;
+ map.data = par->p->fontdata;
info->tileops->fb_settile(info, &map);
}
}
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index 811e9238a77c..cf318e3ddb5c 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -115,6 +115,7 @@
#define DP_MAX_LANE_COUNT 0x002
# define DP_MAX_LANE_COUNT_MASK 0x1f
+# define DP_POST_LT_ADJ_REQ_SUPPORTED (1 << 5) /* 1.3 */
# define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */
# define DP_ENHANCED_FRAME_CAP (1 << 7)
@@ -583,6 +584,7 @@
#define DP_LANE_COUNT_SET 0x101
# define DP_LANE_COUNT_MASK 0x0f
+# define DP_POST_LT_ADJ_REQ_GRANTED (1 << 5) /* 1.3 */
# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
#define DP_TRAINING_PATTERN_SET 0x102
@@ -800,6 +802,7 @@
#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
#define DP_INTERLANE_ALIGN_DONE (1 << 0)
+#define DP_POST_LT_ADJ_REQ_IN_PROGRESS (1 << 1) /* 1.3 */
#define DP_128B132B_DPRX_EQ_INTERLANE_ALIGN_DONE (1 << 2) /* 2.0 E11 */
#define DP_128B132B_DPRX_CDS_INTERLANE_ALIGN_DONE (1 << 3) /* 2.0 E11 */
#define DP_128B132B_LT_FAILED (1 << 4) /* 2.0 E11 */
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index 87caa4f1fdb8..52ce28097015 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -37,6 +37,7 @@ bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count);
bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count);
+bool drm_dp_post_lt_adj_req_in_progress(const u8 link_status[DP_LINK_STATUS_SIZE]);
u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
@@ -156,6 +157,13 @@ drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
}
static inline bool
+drm_dp_post_lt_adj_req_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x13 &&
+ (dpcd[DP_MAX_LANE_COUNT] & DP_POST_LT_ADJ_REQ_SUPPORTED);
+}
+
+static inline bool
drm_dp_fast_training_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
return dpcd[DP_DPCD_REV] >= 0x11 &&
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 76e05930f50e..0ff7ab4aa868 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -1362,6 +1362,13 @@ drm_bridge_get_current_state(struct drm_bridge *bridge)
* drm_bridge_get_next_bridge() - Get the next bridge in the chain
* @bridge: bridge object
*
+ * The caller is responsible of having a reference to @bridge via
+ * drm_bridge_get() or equivalent. This function leaves the refcount of
+ * @bridge unmodified.
+ *
+ * The refcount of the returned bridge is incremented. Use drm_bridge_put()
+ * when done with it.
+ *
* RETURNS:
* the next bridge in the chain after @bridge, or NULL if @bridge is the last.
*/
@@ -1371,7 +1378,7 @@ drm_bridge_get_next_bridge(struct drm_bridge *bridge)
if (list_is_last(&bridge->chain_node, &bridge->encoder->bridge_chain))
return NULL;
- return list_next_entry(bridge, chain_node);
+ return drm_bridge_get(list_next_entry(bridge, chain_node));
}
/**
@@ -1434,15 +1441,61 @@ drm_bridge_chain_get_last_bridge(struct drm_encoder *encoder)
}
/**
- * drm_for_each_bridge_in_chain() - Iterate over all bridges present in a chain
+ * drm_bridge_get_next_bridge_and_put - Get the next bridge in the chain
+ * and put the previous
+ * @bridge: bridge object
+ *
+ * Same as drm_bridge_get_next_bridge() but additionally puts the @bridge.
+ *
+ * RETURNS:
+ * the next bridge in the chain after @bridge, or NULL if @bridge is the last.
+ */
+static inline struct drm_bridge *
+drm_bridge_get_next_bridge_and_put(struct drm_bridge *bridge)
+{
+ struct drm_bridge *next = drm_bridge_get_next_bridge(bridge);
+
+ drm_bridge_put(bridge);
+
+ return next;
+}
+
+/**
+ * drm_for_each_bridge_in_chain_scoped - iterate over all bridges attached
+ * to an encoder
* @encoder: the encoder to iterate bridges on
* @bridge: a bridge pointer updated to point to the current bridge at each
* iteration
*
* Iterate over all bridges present in the bridge chain attached to @encoder.
+ *
+ * Automatically gets/puts the bridge reference while iterating, and puts
+ * the reference even if returning or breaking in the middle of the loop.
+ */
+#define drm_for_each_bridge_in_chain_scoped(encoder, bridge) \
+ for (struct drm_bridge *bridge __free(drm_bridge_put) = \
+ drm_bridge_chain_get_first_bridge(encoder); \
+ bridge; \
+ bridge = drm_bridge_get_next_bridge_and_put(bridge))
+
+/**
+ * drm_for_each_bridge_in_chain_from - iterate over all bridges starting
+ * from the given bridge
+ * @first_bridge: the bridge to start from
+ * @bridge: a bridge pointer updated to point to the current bridge at each
+ * iteration
+ *
+ * Iterate over all bridges in the encoder chain starting from
+ * @first_bridge, included.
+ *
+ * Automatically gets/puts the bridge reference while iterating, and puts
+ * the reference even if returning or breaking in the middle of the loop.
*/
-#define drm_for_each_bridge_in_chain(encoder, bridge) \
- list_for_each_entry(bridge, &(encoder)->bridge_chain, chain_node)
+#define drm_for_each_bridge_in_chain_from(first_bridge, bridge) \
+ for (struct drm_bridge *bridge __free(drm_bridge_put) = \
+ drm_bridge_get(first_bridge); \
+ bridge; \
+ bridge = drm_bridge_get_next_bridge_and_put(bridge))
enum drm_mode_status
drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index 146ca80e35db..bdd845e383ef 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -220,6 +220,7 @@ int drm_client_modeset_check(struct drm_client_dev *client);
int drm_client_modeset_commit_locked(struct drm_client_dev *client);
int drm_client_modeset_commit(struct drm_client_dev *client);
int drm_client_modeset_dpms(struct drm_client_dev *client, int mode);
+int drm_client_modeset_wait_for_vblank(struct drm_client_dev *client, unsigned int crtc_index);
/**
* drm_client_for_each_modeset() - Iterate over client modesets
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 92f5db84b9c2..589f7bfe7506 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -107,10 +107,12 @@ struct drm_gem_shmem_object {
#define to_drm_gem_shmem_obj(obj) \
container_of(obj, struct drm_gem_shmem_object, base)
+int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size);
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev,
size_t size,
struct vfsmount *gemfs);
+void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 323a505e6e6a..fb88301b3c45 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -546,7 +546,7 @@ struct drm_sched_backend_ops {
* @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT,
* as there's usually one run-queue per priority, but could be less.
* @sched_rq: An allocated array of run-queues of size @num_rqs;
- * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
+ * @job_scheduled: once drm_sched_entity_flush() is called the scheduler
* waits on this wait queue until all the scheduled jobs are
* finished.
* @job_id_count: used to assign unique id to the each job.
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index e664a96540eb..bca3a8849d47 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -391,7 +391,7 @@ int ttm_bo_wait_ctx(struct ttm_buffer_object *bo,
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_operation_ctx *ctx);
-void ttm_bo_put(struct ttm_buffer_object *bo);
+void ttm_bo_fini(struct ttm_buffer_object *bo);
void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
struct ttm_lru_bulk_move *bulk);
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h
index 160ee1411d4a..e470b0221e02 100644
--- a/include/uapi/drm/ivpu_accel.h
+++ b/include/uapi/drm/ivpu_accel.h
@@ -90,6 +90,7 @@ extern "C" {
#define DRM_IVPU_PARAM_TILE_CONFIG 11
#define DRM_IVPU_PARAM_SKU 12
#define DRM_IVPU_PARAM_CAPABILITIES 13
+#define DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE 14
#define DRM_IVPU_PLATFORM_TYPE_SILICON 0
@@ -176,6 +177,9 @@ struct drm_ivpu_param {
*
* %DRM_IVPU_PARAM_CAPABILITIES:
* Supported capabilities (read-only)
+ *
+ * %DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE:
+ * Size of the preemption buffer (read-only)
*/
__u32 param;
@@ -371,6 +375,13 @@ struct drm_ivpu_cmdq_submit {
* to be executed. The offset has to be 8-byte aligned.
*/
__u32 commands_offset;
+ /**
+ * @preempt_buffer_index:
+ *
+ * Index of the preemption buffer in the buffers_ptr array.
+ */
+ __u32 preempt_buffer_index;
+ __u32 reserved;
};
/* drm_ivpu_bo_wait job status codes */
diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
index ed67510395bd..e8b47c9f6976 100644
--- a/include/uapi/drm/panfrost_drm.h
+++ b/include/uapi/drm/panfrost_drm.h
@@ -22,6 +22,8 @@ extern "C" {
#define DRM_PANFROST_PERFCNT_DUMP 0x07
#define DRM_PANFROST_MADVISE 0x08
#define DRM_PANFROST_SET_LABEL_BO 0x09
+#define DRM_PANFROST_JM_CTX_CREATE 0x0a
+#define DRM_PANFROST_JM_CTX_DESTROY 0x0b
#define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
#define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
@@ -31,6 +33,8 @@ extern "C" {
#define DRM_IOCTL_PANFROST_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset)
#define DRM_IOCTL_PANFROST_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MADVISE, struct drm_panfrost_madvise)
#define DRM_IOCTL_PANFROST_SET_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_SET_LABEL_BO, struct drm_panfrost_set_label_bo)
+#define DRM_IOCTL_PANFROST_JM_CTX_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_JM_CTX_CREATE, struct drm_panfrost_jm_ctx_create)
+#define DRM_IOCTL_PANFROST_JM_CTX_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_JM_CTX_DESTROY, struct drm_panfrost_jm_ctx_destroy)
/*
* Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module
@@ -71,6 +75,12 @@ struct drm_panfrost_submit {
/** A combination of PANFROST_JD_REQ_* */
__u32 requirements;
+
+ /** JM context handle. Zero if you want to use the default context. */
+ __u32 jm_ctx_handle;
+
+ /** Padding field. MBZ. */
+ __u32 pad;
};
/**
@@ -177,6 +187,7 @@ enum drm_panfrost_param {
DRM_PANFROST_PARAM_AFBC_FEATURES,
DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP,
DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP_FREQUENCY,
+ DRM_PANFROST_PARAM_ALLOWED_JM_CTX_PRIORITIES,
};
struct drm_panfrost_get_param {
@@ -299,6 +310,45 @@ struct panfrost_dump_registers {
__u32 value;
};
+enum drm_panfrost_jm_ctx_priority {
+ /**
+ * @PANFROST_JM_CTX_PRIORITY_LOW: Low priority context.
+ */
+ PANFROST_JM_CTX_PRIORITY_LOW = 0,
+
+ /**
+ * @PANFROST_JM_CTX_PRIORITY_MEDIUM: Medium priority context.
+ */
+ PANFROST_JM_CTX_PRIORITY_MEDIUM,
+
+ /**
+ * @PANFROST_JM_CTX_PRIORITY_HIGH: High priority context.
+ *
+ * Requires CAP_SYS_NICE or DRM_MASTER.
+ */
+ PANFROST_JM_CTX_PRIORITY_HIGH,
+};
+
+struct drm_panfrost_jm_ctx_create {
+ /** @handle: Handle of the created JM context */
+ __u32 handle;
+
+ /** @priority: Context priority (see enum drm_panfrost_jm_ctx_priority). */
+ __u32 priority;
+};
+
+struct drm_panfrost_jm_ctx_destroy {
+ /**
+ * @handle: Handle of the JM context to destroy.
+ *
+ * Must be a valid context handle returned by DRM_IOCTL_PANTHOR_JM_CTX_CREATE.
+ */
+ __u32 handle;
+
+ /** @pad: Padding field, MBZ. */
+ __u32 pad;
+};
+
#if defined(__cplusplus)
}
#endif