summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c225
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c178
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c227
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h191
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c195
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c515
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h10
67 files changed, 2521 insertions, 742 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 25a95c95df14..ef9a3b6d7b62 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -25,7 +25,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
- amdgpu_queue_mgr.o amdgpu_vf_error.o
+ amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index ebfc267467ee..cbcb6a153aba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -121,6 +121,7 @@ extern int amdgpu_cntl_sb_buf_per_se;
extern int amdgpu_param_buf_per_se;
extern int amdgpu_job_hang_limit;
extern int amdgpu_lbpw;
+extern int amdgpu_compute_multipipe;
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
@@ -731,10 +732,14 @@ struct amdgpu_ctx {
struct amdgpu_device *adev;
struct amdgpu_queue_mgr queue_mgr;
unsigned reset_counter;
+ uint32_t vram_lost_counter;
spinlock_t ring_lock;
struct dma_fence **fences;
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
- bool preamble_presented;
+ bool preamble_presented;
+ enum amd_sched_priority init_priority;
+ enum amd_sched_priority override_priority;
+ struct mutex lock;
};
struct amdgpu_ctx_mgr {
@@ -751,13 +756,18 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct dma_fence *fence, uint64_t *seq);
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);
+void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
+ enum amd_sched_priority priority);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
+
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
+
/*
* file private structure
*/
@@ -769,7 +779,6 @@ struct amdgpu_fpriv {
struct mutex bo_list_lock;
struct idr bo_list_handles;
struct amdgpu_ctx_mgr ctx_mgr;
- u32 vram_lost_counter;
};
/*
@@ -870,7 +879,7 @@ struct amdgpu_mec {
struct amdgpu_kiq {
u64 eop_gpu_addr;
struct amdgpu_bo *eop_obj;
- struct mutex ring_mutex;
+ spinlock_t ring_lock;
struct amdgpu_ring ring;
struct amdgpu_irq_src irq;
};
@@ -1034,6 +1043,10 @@ struct amdgpu_gfx {
bool in_suspend;
/* NGG */
struct amdgpu_ngg ngg;
+
+ /* pipe reservation */
+ struct mutex pipe_reserve_mutex;
+ DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
};
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -1112,6 +1125,7 @@ struct amdgpu_job {
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size;
+ uint32_t vram_lost_counter;
/* user fence handling */
uint64_t uf_addr;
@@ -1137,7 +1151,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
/*
* Writeback
*/
-#define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
+#define AMDGPU_MAX_WB 512 /* Reserve at most 512 WB slots for amdgpu-owned rings. */
struct amdgpu_wb {
struct amdgpu_bo *wb_obj;
@@ -1310,6 +1324,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
@@ -1376,6 +1392,18 @@ struct amdgpu_atcs {
};
/*
+ * Firmware VRAM reservation
+ */
+struct amdgpu_fw_vram_usage {
+ u64 start_offset;
+ u64 size;
+ struct amdgpu_bo *reserved_bo;
+ void *va;
+};
+
+int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev);
+
+/*
* CGS
*/
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
@@ -1524,7 +1552,6 @@ struct amdgpu_device {
/* powerplay */
struct amd_powerplay powerplay;
- bool pp_enabled;
bool pp_force_state_enabled;
/* dpm */
@@ -1580,6 +1607,8 @@ struct amdgpu_device {
struct delayed_work late_init_work;
struct amdgpu_virt virt;
+ /* firmware VRAM reservation */
+ struct amdgpu_fw_vram_usage fw_vram_usage;
/* link all shadow bo */
struct list_head shadow_list;
@@ -1831,8 +1860,6 @@ static inline bool amdgpu_has_atpx(void) { return false; }
extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
extern const int amdgpu_max_kms_ioctl;
-bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
- struct amdgpu_fpriv *fpriv);
int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
void amdgpu_driver_unload_kms(struct drm_device *dev);
void amdgpu_driver_lastclose_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index a52795d9b458..c04f44a90392 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -35,41 +35,50 @@
#include "acp_gfx_if.h"
-#define ACP_TILE_ON_MASK 0x03
-#define ACP_TILE_OFF_MASK 0x02
-#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
-#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
-
-#define ACP_TILE_P1_MASK 0x3e
-#define ACP_TILE_P2_MASK 0x3d
-#define ACP_TILE_DSP0_MASK 0x3b
-#define ACP_TILE_DSP1_MASK 0x37
-
-#define ACP_TILE_DSP2_MASK 0x2f
-
-#define ACP_DMA_REGS_END 0x146c0
-#define ACP_I2S_PLAY_REGS_START 0x14840
-#define ACP_I2S_PLAY_REGS_END 0x148b4
-#define ACP_I2S_CAP_REGS_START 0x148b8
-#define ACP_I2S_CAP_REGS_END 0x1496c
-
-#define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
-#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
-#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
-#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
-
-#define mmACP_PGFSM_RETAIN_REG 0x51c9
-#define mmACP_PGFSM_CONFIG_REG 0x51ca
-#define mmACP_PGFSM_READ_REG_0 0x51cc
-
-#define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
-#define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
-#define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
-#define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
-
-#define ACP_TIMEOUT_LOOP 0x000000FF
-#define ACP_DEVS 3
-#define ACP_SRC_ID 162
+#define ACP_TILE_ON_MASK 0x03
+#define ACP_TILE_OFF_MASK 0x02
+#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
+#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
+
+#define ACP_TILE_P1_MASK 0x3e
+#define ACP_TILE_P2_MASK 0x3d
+#define ACP_TILE_DSP0_MASK 0x3b
+#define ACP_TILE_DSP1_MASK 0x37
+
+#define ACP_TILE_DSP2_MASK 0x2f
+
+#define ACP_DMA_REGS_END 0x146c0
+#define ACP_I2S_PLAY_REGS_START 0x14840
+#define ACP_I2S_PLAY_REGS_END 0x148b4
+#define ACP_I2S_CAP_REGS_START 0x148b8
+#define ACP_I2S_CAP_REGS_END 0x1496c
+
+#define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
+#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
+#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
+#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
+
+#define mmACP_PGFSM_RETAIN_REG 0x51c9
+#define mmACP_PGFSM_CONFIG_REG 0x51ca
+#define mmACP_PGFSM_READ_REG_0 0x51cc
+
+#define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
+#define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
+#define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
+#define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
+
+#define mmACP_CONTROL 0x5131
+#define mmACP_STATUS 0x5133
+#define mmACP_SOFT_RESET 0x5134
+#define ACP_CONTROL__ClkEn_MASK 0x1
+#define ACP_SOFT_RESET__SoftResetAud_MASK 0x100
+#define ACP_SOFT_RESET__SoftResetAudDone_MASK 0x1000000
+#define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF
+#define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF
+
+#define ACP_TIMEOUT_LOOP 0x000000FF
+#define ACP_DEVS 3
+#define ACP_SRC_ID 162
enum {
ACP_TILE_P1 = 0,
@@ -260,6 +269,8 @@ static int acp_hw_init(void *handle)
{
int r, i;
uint64_t acp_base;
+ u32 val = 0;
+ u32 count = 0;
struct device *dev;
struct i2s_platform_data *i2s_pdata;
@@ -371,6 +382,8 @@ static int acp_hw_init(void *handle)
adev->acp.acp_cell[0].name = "acp_audio_dma";
adev->acp.acp_cell[0].num_resources = 4;
adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
+ adev->acp.acp_cell[0].platform_data = &adev->asic_type;
+ adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
adev->acp.acp_cell[1].name = "designware-i2s";
adev->acp.acp_cell[1].num_resources = 1;
@@ -400,6 +413,46 @@ static int acp_hw_init(void *handle)
}
}
+ /* Assert Soft reset of ACP */
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
+
+ val |= ACP_SOFT_RESET__SoftResetAud_MASK;
+ cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
+
+ count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
+ while (true) {
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
+ if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
+ (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
+ break;
+ if (--count == 0) {
+ dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
+ return -ETIMEDOUT;
+ }
+ udelay(100);
+ }
+ /* Enable clock to ACP and wait until the clock is enabled */
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
+ val = val | ACP_CONTROL__ClkEn_MASK;
+ cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
+
+ count = ACP_CLOCK_EN_TIME_OUT_VALUE;
+
+ while (true) {
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
+ if (val & (u32) 0x1)
+ break;
+ if (--count == 0) {
+ dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
+ return -ETIMEDOUT;
+ }
+ udelay(100);
+ }
+ /* Deassert the SOFT RESET flags */
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
+ val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
+ cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
+
return 0;
}
@@ -412,6 +465,8 @@ static int acp_hw_init(void *handle)
static int acp_hw_fini(void *handle)
{
int i, ret;
+ u32 val = 0;
+ u32 count = 0;
struct device *dev;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -419,6 +474,42 @@ static int acp_hw_fini(void *handle)
if (!adev->acp.acp_cell)
return 0;
+ /* Assert Soft reset of ACP */
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
+
+ val |= ACP_SOFT_RESET__SoftResetAud_MASK;
+ cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
+
+ count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
+ while (true) {
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
+ if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
+ (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
+ break;
+ if (--count == 0) {
+ dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
+ return -ETIMEDOUT;
+ }
+ udelay(100);
+ }
+ /* Disable ACP clock */
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
+ val &= ~ACP_CONTROL__ClkEn_MASK;
+ cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
+
+ count = ACP_CLOCK_EN_TIME_OUT_VALUE;
+
+ while (true) {
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
+ if (val & (u32) 0x1)
+ break;
+ if (--count == 0) {
+ dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
+ return -ETIMEDOUT;
+ }
+ udelay(100);
+ }
+
if (adev->acp.acp_genpd) {
for (i = 0; i < ACP_DEVS ; i++) {
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index dc7e25cce741..47d1c132ac40 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -338,6 +338,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
struct cik_mqd *m;
uint32_t *mqd_hqd;
uint32_t reg, wptr_val, data;
+ bool valid_wptr = false;
m = get_mqd(mqd);
@@ -356,7 +357,14 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
- if (read_user_wptr(mm, wptr, wptr_val))
+ /* read_user_ptr may take the mm->mmap_sem.
+ * release srbm_mutex to avoid circular dependency between
+ * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
+ */
+ release_queue(kgd);
+ valid_wptr = read_user_wptr(mm, wptr, wptr_val);
+ acquire_queue(kgd, pipe_id, queue_id);
+ if (valid_wptr)
WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index c678c69936a0..056929b8ccd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -292,6 +292,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
struct vi_mqd *m;
uint32_t *mqd_hqd;
uint32_t reg, wptr_val, data;
+ bool valid_wptr = false;
m = get_mqd(mqd);
@@ -339,7 +340,14 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
- if (read_user_wptr(mm, wptr, wptr_val))
+ /* read_user_ptr may take the mm->mmap_sem.
+ * release srbm_mutex to avoid circular dependency between
+ * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
+ */
+ release_queue(kgd);
+ valid_wptr = read_user_wptr(mm, wptr, wptr_val);
+ acquire_queue(kgd, pipe_id, queue_id);
+ if (valid_wptr)
WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index ce443586a0c7..f450b69323fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1766,34 +1766,32 @@ bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev)
return true;
}
-/* Atom needs data in little endian format
- * so swap as appropriate when copying data to
- * or from atom. Note that atom operates on
- * dw units.
+/* Atom needs data in little endian format so swap as appropriate when copying
+ * data to or from atom. Note that atom operates on dw units.
+ *
+ * Use to_le=true when sending data to atom and provide at least
+ * ALIGN(num_bytes,4) bytes in the dst buffer.
+ *
+ * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
+ * byes in the src buffer.
*/
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
{
#ifdef __BIG_ENDIAN
- u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
- u32 *dst32, *src32;
+ u32 src_tmp[5], dst_tmp[5];
int i;
+ u8 align_num_bytes = ALIGN(num_bytes, 4);
- memcpy(src_tmp, src, num_bytes);
- src32 = (u32 *)src_tmp;
- dst32 = (u32 *)dst_tmp;
if (to_le) {
- for (i = 0; i < ((num_bytes + 3) / 4); i++)
- dst32[i] = cpu_to_le32(src32[i]);
- memcpy(dst, dst_tmp, num_bytes);
+ memcpy(src_tmp, src, num_bytes);
+ for (i = 0; i < align_num_bytes / 4; i++)
+ dst_tmp[i] = cpu_to_le32(src_tmp[i]);
+ memcpy(dst, dst_tmp, align_num_bytes);
} else {
- u8 dws = num_bytes & ~3;
- for (i = 0; i < ((num_bytes + 3) / 4); i++)
- dst32[i] = le32_to_cpu(src32[i]);
- memcpy(dst, dst_tmp, dws);
- if (num_bytes % 4) {
- for (i = 0; i < (num_bytes % 4); i++)
- dst[dws+i] = dst_tmp[dws+i];
- }
+ memcpy(src_tmp, src, align_num_bytes);
+ for (i = 0; i < align_num_bytes / 4; i++)
+ dst_tmp[i] = le32_to_cpu(src_tmp[i]);
+ memcpy(dst, dst_tmp, num_bytes);
}
#else
memcpy(dst, src, num_bytes);
@@ -1807,6 +1805,8 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
uint16_t data_offset;
int usage_bytes = 0;
struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
+ u64 start_addr;
+ u64 size;
if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
@@ -1815,7 +1815,21 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
- usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
+ start_addr = firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware;
+ size = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb;
+
+ if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
+ (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
+ ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
+ /* Firmware request VRAM reservation for SR-IOV */
+ adev->fw_vram_usage.start_offset = (start_addr &
+ (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
+ adev->fw_vram_usage.size = size << 10;
+ /* Use the default scratch size */
+ usage_bytes = 0;
+ } else {
+ usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
+ }
}
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index f9ffe8ef0cd6..ff8efd0f8fd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -71,19 +71,33 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
struct atom_context *ctx = adev->mode_info.atom_context;
int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
vram_usagebyfirmware);
+ struct vram_usagebyfirmware_v2_1 * firmware_usage;
+ uint32_t start_addr, size;
uint16_t data_offset;
int usage_bytes = 0;
if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
- struct vram_usagebyfirmware_v2_1 *firmware_usage =
- (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
-
+ firmware_usage = (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
le32_to_cpu(firmware_usage->start_address_in_kb),
le16_to_cpu(firmware_usage->used_by_firmware_in_kb),
le16_to_cpu(firmware_usage->used_by_driver_in_kb));
- usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) * 1024;
+ start_addr = le32_to_cpu(firmware_usage->start_address_in_kb);
+ size = le16_to_cpu(firmware_usage->used_by_firmware_in_kb);
+
+ if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
+ (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
+ ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
+ /* Firmware request VRAM reservation for SR-IOV */
+ adev->fw_vram_usage.start_offset = (start_addr &
+ (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
+ adev->fw_vram_usage.size = size << 10;
+ /* Use the default scratch size */
+ usage_bytes = 0;
+ } else {
+ usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) << 10;
+ }
}
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 383204e911a4..a7afe553e0a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -42,6 +42,28 @@ struct amdgpu_cgs_device {
struct amdgpu_device *adev = \
((struct amdgpu_cgs_device *)cgs_device)->adev
+static void *amdgpu_cgs_register_pp_handle(struct cgs_device *cgs_device,
+ int (*call_back_func)(struct amd_pp_init *, void **))
+{
+ CGS_FUNC_ADEV;
+ struct amd_pp_init pp_init;
+ struct amd_powerplay *amd_pp;
+
+ if (call_back_func == NULL)
+ return NULL;
+
+ amd_pp = &(adev->powerplay);
+ pp_init.chip_family = adev->family;
+ pp_init.chip_id = adev->asic_type;
+ pp_init.pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
+ pp_init.feature_mask = amdgpu_pp_feature_mask;
+ pp_init.device = cgs_device;
+ if (call_back_func(&pp_init, &(amd_pp->pp_handle)))
+ return NULL;
+
+ return adev->powerplay.pp_handle;
+}
+
static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
enum cgs_gpu_mem_type type,
uint64_t size, uint64_t align,
@@ -1179,6 +1201,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
.is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
.enter_safe_mode = amdgpu_cgs_enter_safe_mode,
.lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
+ .register_pp_handle = amdgpu_cgs_register_pp_handle,
};
static const struct cgs_os_ops amdgpu_cgs_os_ops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 126be9e0f05f..8ca3783f2deb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -231,7 +231,7 @@ amdgpu_connector_update_scratch_regs(struct drm_connector *connector,
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev,
+ encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -256,7 +256,7 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev,
+ encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -371,7 +371,7 @@ amdgpu_connector_best_single_encoder(struct drm_connector *connector)
/* pick the encoder ids */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
@@ -1076,7 +1076,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -1133,7 +1133,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -1152,7 +1152,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
@@ -1293,7 +1293,7 @@ u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev,
+ encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -1322,7 +1322,7 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev,
+ encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index c6a214f1e991..f7fceb63413c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -25,6 +25,7 @@
* Jerome Glisse <glisse@freedesktop.org>
*/
#include <linux/pagemap.h>
+#include <linux/sync_file.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h>
@@ -89,12 +90,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
goto free_chunk;
}
+ mutex_lock(&p->ctx->lock);
+
/* get chunks */
chunk_array_user = u64_to_user_ptr(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
sizeof(uint64_t)*cs->in.num_chunks)) {
ret = -EFAULT;
- goto put_ctx;
+ goto free_chunk;
}
p->nchunks = cs->in.num_chunks;
@@ -102,7 +105,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
GFP_KERNEL);
if (!p->chunks) {
ret = -ENOMEM;
- goto put_ctx;
+ goto free_chunk;
}
for (i = 0; i < p->nchunks; i++) {
@@ -169,6 +172,11 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
if (ret)
goto free_all_kdata;
+ if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
+ ret = -ECANCELED;
+ goto free_all_kdata;
+ }
+
if (p->uf_entry.robj)
p->job->uf_addr = uf_offset;
kfree(chunk_array);
@@ -182,8 +190,6 @@ free_partial_kdata:
kfree(p->chunks);
p->chunks = NULL;
p->nchunks = 0;
-put_ctx:
- amdgpu_ctx_put(p->ctx);
free_chunk:
kfree(chunk_array);
@@ -704,7 +710,8 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
list_for_each_entry(e, &p->validated, tv.head) {
struct reservation_object *resv = e->robj->tbo.resv;
- r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp);
+ r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
+ amdgpu_bo_explicit_sync(e->robj));
if (r)
return r;
@@ -735,8 +742,10 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
dma_fence_put(parser->fence);
- if (parser->ctx)
+ if (parser->ctx) {
+ mutex_unlock(&parser->ctx->lock);
amdgpu_ctx_put(parser->ctx);
+ }
if (parser->bo_list)
amdgpu_bo_list_put(parser->bo_list);
@@ -843,14 +852,58 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_ring *ring = p->job->ring;
- int i, r;
+ int r;
/* Only for UVD/VCE VM emulation */
- if (ring->funcs->parse_cs) {
- for (i = 0; i < p->job->num_ibs; i++) {
- r = amdgpu_ring_parse_cs(ring, p, i);
+ if (p->job->ring->funcs->parse_cs) {
+ unsigned i, j;
+
+ for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
+ struct drm_amdgpu_cs_chunk_ib *chunk_ib;
+ struct amdgpu_bo_va_mapping *m;
+ struct amdgpu_bo *aobj = NULL;
+ struct amdgpu_cs_chunk *chunk;
+ struct amdgpu_ib *ib;
+ uint64_t offset;
+ uint8_t *kptr;
+
+ chunk = &p->chunks[i];
+ ib = &p->job->ibs[j];
+ chunk_ib = chunk->kdata;
+
+ if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
+ continue;
+
+ r = amdgpu_cs_find_mapping(p, chunk_ib->va_start,
+ &aobj, &m);
+ if (r) {
+ DRM_ERROR("IB va_start is invalid\n");
+ return r;
+ }
+
+ if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
+ (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
+ DRM_ERROR("IB va_start+ib_bytes is invalid\n");
+ return -EINVAL;
+ }
+
+ /* the IB should be reserved at this point */
+ r = amdgpu_bo_kmap(aobj, (void **)&kptr);
+ if (r) {
+ return r;
+ }
+
+ offset = m->start * AMDGPU_GPU_PAGE_SIZE;
+ kptr += chunk_ib->va_start - offset;
+
+ memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
+ amdgpu_bo_kunmap(aobj);
+
+ r = amdgpu_ring_parse_cs(ring, p, j);
if (r)
return r;
+
+ j++;
}
}
@@ -917,54 +970,18 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
parser->job->ring = ring;
- if (ring->funcs->parse_cs) {
- struct amdgpu_bo_va_mapping *m;
- struct amdgpu_bo *aobj = NULL;
- uint64_t offset;
- uint8_t *kptr;
-
- r = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
- &aobj, &m);
- if (r) {
- DRM_ERROR("IB va_start is invalid\n");
- return r;
- }
-
- if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
- (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
- DRM_ERROR("IB va_start+ib_bytes is invalid\n");
- return -EINVAL;
- }
-
- /* the IB should be reserved at this point */
- r = amdgpu_bo_kmap(aobj, (void **)&kptr);
- if (r) {
- return r;
- }
-
- offset = m->start * AMDGPU_GPU_PAGE_SIZE;
- kptr += chunk_ib->va_start - offset;
-
- r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
- if (r) {
- DRM_ERROR("Failed to get ib !\n");
- return r;
- }
-
- memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
- amdgpu_bo_kunmap(aobj);
- } else {
- r = amdgpu_ib_get(adev, vm, 0, ib);
- if (r) {
- DRM_ERROR("Failed to get ib !\n");
- return r;
- }
-
+ r = amdgpu_ib_get(adev, vm,
+ ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
+ ib);
+ if (r) {
+ DRM_ERROR("Failed to get ib !\n");
+ return r;
}
ib->gpu_addr = chunk_ib->va_start;
ib->length_dw = chunk_ib->ib_bytes / 4;
ib->flags = chunk_ib->flags;
+
j++;
}
@@ -974,7 +991,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL;
- return 0;
+ return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx);
}
static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
@@ -1175,6 +1192,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->uf_sequence = seq;
amdgpu_job_free_resources(job);
+ amdgpu_ring_priority_get(job->ring,
+ amd_sched_get_job_priority(&job->base));
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
@@ -1188,7 +1207,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_cs *cs = data;
struct amdgpu_cs_parser parser = {};
bool reserved_buffers = false;
@@ -1196,8 +1214,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (!adev->accel_working)
return -EBUSY;
- if (amdgpu_kms_vram_lost(adev, fpriv))
- return -ENODEV;
parser.adev = adev;
parser.filp = filp;
@@ -1208,6 +1224,10 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out;
}
+ r = amdgpu_cs_ib_fill(adev, &parser);
+ if (r)
+ goto out;
+
r = amdgpu_cs_parser_bos(&parser, data);
if (r) {
if (r == -ENOMEM)
@@ -1218,9 +1238,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
reserved_buffers = true;
- r = amdgpu_cs_ib_fill(adev, &parser);
- if (r)
- goto out;
r = amdgpu_cs_dependencies(adev, &parser);
if (r) {
@@ -1256,16 +1273,12 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
{
union drm_amdgpu_wait_cs *wait = data;
struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_fpriv *fpriv = filp->driver_priv;
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
struct amdgpu_ring *ring = NULL;
struct amdgpu_ctx *ctx;
struct dma_fence *fence;
long r;
- if (amdgpu_kms_vram_lost(adev, fpriv))
- return -ENODEV;
-
ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
if (ctx == NULL)
return -EINVAL;
@@ -1283,6 +1296,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
r = PTR_ERR(fence);
else if (fence) {
r = dma_fence_wait_timeout(fence, true, timeout);
+ if (r > 0 && fence->error)
+ r = fence->error;
dma_fence_put(fence);
} else
r = 1;
@@ -1330,6 +1345,62 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
return fence;
}
+int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+ union drm_amdgpu_fence_to_handle *info = data;
+ struct dma_fence *fence;
+ struct drm_syncobj *syncobj;
+ struct sync_file *sync_file;
+ int fd, r;
+
+ fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+
+ switch (info->in.what) {
+ case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
+ r = drm_syncobj_create(&syncobj, 0, fence);
+ dma_fence_put(fence);
+ if (r)
+ return r;
+ r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
+ drm_syncobj_put(syncobj);
+ return r;
+
+ case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
+ r = drm_syncobj_create(&syncobj, 0, fence);
+ dma_fence_put(fence);
+ if (r)
+ return r;
+ r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
+ drm_syncobj_put(syncobj);
+ return r;
+
+ case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0) {
+ dma_fence_put(fence);
+ return fd;
+ }
+
+ sync_file = sync_file_create(fence);
+ dma_fence_put(fence);
+ if (!sync_file) {
+ put_unused_fd(fd);
+ return -ENOMEM;
+ }
+
+ fd_install(fd, sync_file->file);
+ info->out.handle = fd;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
/**
* amdgpu_cs_wait_all_fence - wait on all fences to signal
*
@@ -1364,6 +1435,9 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
if (r == 0)
break;
+
+ if (fence->error)
+ return fence->error;
}
memset(wait, 0, sizeof(*wait));
@@ -1424,7 +1498,7 @@ out:
wait->out.status = (r > 0);
wait->out.first_signaled = first;
/* set return value 0 to indicate success */
- r = 0;
+ r = array[first]->error;
err_free_fence_array:
for (i = 0; i < fence_count; i++)
@@ -1445,15 +1519,12 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_wait_fences *wait = data;
uint32_t fence_count = wait->in.fence_count;
struct drm_amdgpu_fence *fences_user;
struct drm_amdgpu_fence *fences;
int r;
- if (amdgpu_kms_vram_lost(adev, fpriv))
- return -ENODEV;
/* Get the fences from userspace */
fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
GFP_KERNEL);
@@ -1511,14 +1582,14 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
return -EINVAL;
- r = amdgpu_ttm_bind(&(*bo)->tbo, &(*bo)->tbo.mem);
- if (unlikely(r))
- return r;
-
- if ((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
- return 0;
+ if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
+ (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
+ r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false,
+ false);
+ if (r)
+ return r;
+ }
- (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
- return ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false, false);
+ return amdgpu_ttm_bind(&(*bo)->tbo, &(*bo)->tbo.mem);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 75c933b1a432..c184468e2b2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -23,13 +23,41 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_auth.h>
#include "amdgpu.h"
+#include "amdgpu_sched.h"
-static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
+static int amdgpu_ctx_priority_permit(struct drm_file *filp,
+ enum amd_sched_priority priority)
+{
+ /* NORMAL and below are accessible by everyone */
+ if (priority <= AMD_SCHED_PRIORITY_NORMAL)
+ return 0;
+
+ if (capable(CAP_SYS_NICE))
+ return 0;
+
+ if (drm_is_current_master(filp))
+ return 0;
+
+ return -EACCES;
+}
+
+static int amdgpu_ctx_init(struct amdgpu_device *adev,
+ enum amd_sched_priority priority,
+ struct drm_file *filp,
+ struct amdgpu_ctx *ctx)
{
unsigned i, j;
int r;
+ if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX)
+ return -EINVAL;
+
+ r = amdgpu_ctx_priority_permit(filp, priority);
+ if (r)
+ return r;
+
memset(ctx, 0, sizeof(*ctx));
ctx->adev = adev;
kref_init(&ctx->refcount);
@@ -39,19 +67,24 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
if (!ctx->fences)
return -ENOMEM;
+ mutex_init(&ctx->lock);
+
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
ctx->rings[i].sequence = 1;
ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
}
ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+ ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
+ ctx->init_priority = priority;
+ ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;
/* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) {
struct amdgpu_ring *ring = adev->rings[i];
struct amd_sched_rq *rq;
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+ rq = &ring->sched.sched_rq[priority];
if (ring == &adev->gfx.kiq.ring)
continue;
@@ -96,10 +129,14 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
&ctx->rings[i].entity);
amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
+
+ mutex_destroy(&ctx->lock);
}
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv,
+ struct drm_file *filp,
+ enum amd_sched_priority priority,
uint32_t *id)
{
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
@@ -117,8 +154,9 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
kfree(ctx);
return r;
}
+
*id = (uint32_t)r;
- r = amdgpu_ctx_init(adev, ctx);
+ r = amdgpu_ctx_init(adev, priority, filp, ctx);
if (r) {
idr_remove(&mgr->ctx_handles, *id);
*id = 0;
@@ -193,6 +231,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
{
int r;
uint32_t id;
+ enum amd_sched_priority priority;
union drm_amdgpu_ctx *args = data;
struct amdgpu_device *adev = dev->dev_private;
@@ -200,10 +239,16 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
r = 0;
id = args->in.ctx_id;
+ priority = amdgpu_to_sched_priority(args->in.priority);
+
+ /* For backwards compatibility reasons, we need to accept
+ * ioctls with garbage in the priority field */
+ if (priority == AMD_SCHED_PRIORITY_INVALID)
+ priority = AMD_SCHED_PRIORITY_NORMAL;
switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX:
- r = amdgpu_ctx_alloc(adev, fpriv, &id);
+ r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
args->out.alloc.ctx_id = id;
break;
case AMDGPU_CTX_OP_FREE_CTX:
@@ -256,12 +301,8 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
idx = seq & (amdgpu_sched_jobs - 1);
other = cring->fences[idx];
- if (other) {
- signed long r;
- r = dma_fence_wait_timeout(other, true, MAX_SCHEDULE_TIMEOUT);
- if (r < 0)
- return r;
- }
+ if (other)
+ BUG_ON(!dma_fence_is_signaled(other));
dma_fence_get(fence);
@@ -305,6 +346,51 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
return fence;
}
+void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
+ enum amd_sched_priority priority)
+{
+ int i;
+ struct amdgpu_device *adev = ctx->adev;
+ struct amd_sched_rq *rq;
+ struct amd_sched_entity *entity;
+ struct amdgpu_ring *ring;
+ enum amd_sched_priority ctx_prio;
+
+ ctx->override_priority = priority;
+
+ ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ?
+ ctx->init_priority : ctx->override_priority;
+
+ for (i = 0; i < adev->num_rings; i++) {
+ ring = adev->rings[i];
+ entity = &ctx->rings[i].entity;
+ rq = &ring->sched.sched_rq[ctx_prio];
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ continue;
+
+ amd_sched_entity_set_rq(entity, rq);
+ }
+}
+
+int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
+{
+ struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id];
+ unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1);
+ struct dma_fence *other = cring->fences[idx];
+
+ if (other) {
+ signed long r;
+ r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+ if (r < 0) {
+ DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+ return r;
+ }
+ }
+
+ return 0;
+}
+
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
{
mutex_init(&mgr->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 3e84ddf9e3b5..efcacb827de7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -56,6 +56,7 @@
#include "amdgpu_vf_error.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_pm.h"
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -108,10 +109,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
{
uint32_t ret;
- if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
- BUG_ON(in_interrupt());
+ if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
return amdgpu_virt_kiq_rreg(adev, reg);
- }
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
@@ -136,10 +135,8 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
adev->last_mm_index = v;
}
- if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
- BUG_ON(in_interrupt());
+ if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
return amdgpu_virt_kiq_wreg(adev, reg, v);
- }
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
@@ -549,7 +546,7 @@ int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
if (offset < adev->wb.num_wb) {
__set_bit(offset, adev->wb.used);
- *wb = offset * 8; /* convert to dw offset */
+ *wb = offset << 3; /* convert to dw offset */
return 0;
} else {
return -EINVAL;
@@ -567,7 +564,7 @@ int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
{
if (wb < adev->wb.num_wb)
- __clear_bit(wb, adev->wb.used);
+ __clear_bit(wb >> 3, adev->wb.used);
}
/**
@@ -657,42 +654,96 @@ void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
}
/*
- * GPU helpers function.
+ * Firmware Reservation functions
*/
/**
- * amdgpu_need_post - check if the hw need post or not
+ * amdgpu_fw_reserve_vram_fini - free fw reserved vram
*
* @adev: amdgpu_device pointer
*
- * Check if the asic has been initialized (all asics) at driver startup
- * or post is needed if hw reset is performed.
- * Returns true if need or false if not.
+ * free fw reserved vram if it has been reserved.
*/
-bool amdgpu_need_post(struct amdgpu_device *adev)
+void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
{
- uint32_t reg;
+ amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
+ NULL, &adev->fw_vram_usage.va);
+}
- if (adev->has_hw_reset) {
- adev->has_hw_reset = false;
- return true;
- }
+/**
+ * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * create bo vram reservation from fw.
+ */
+int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
+{
+ int r = 0;
+ u64 gpu_addr;
+ u64 vram_size = adev->mc.visible_vram_size;
- /* bios scratch used on CIK+ */
- if (adev->asic_type >= CHIP_BONAIRE)
- return amdgpu_atombios_scratch_need_asic_init(adev);
+ adev->fw_vram_usage.va = NULL;
+ adev->fw_vram_usage.reserved_bo = NULL;
- /* check MEM_SIZE for older asics */
- reg = amdgpu_asic_get_config_memsize(adev);
+ if (adev->fw_vram_usage.size > 0 &&
+ adev->fw_vram_usage.size <= vram_size) {
- if ((reg != 0) && (reg != 0xffffffff))
- return false;
+ r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
+ PAGE_SIZE, true, 0,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
+ &adev->fw_vram_usage.reserved_bo);
+ if (r)
+ goto error_create;
- return true;
+ r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
+ if (r)
+ goto error_reserve;
+ r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ adev->fw_vram_usage.start_offset,
+ (adev->fw_vram_usage.start_offset +
+ adev->fw_vram_usage.size), &gpu_addr);
+ if (r)
+ goto error_pin;
+ r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
+ &adev->fw_vram_usage.va);
+ if (r)
+ goto error_kmap;
+ amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
+ }
+ return r;
+
+error_kmap:
+ amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
+error_pin:
+ amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
+error_reserve:
+ amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
+error_create:
+ adev->fw_vram_usage.va = NULL;
+ adev->fw_vram_usage.reserved_bo = NULL;
+ return r;
}
-static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
+
+/*
+ * GPU helpers function.
+ */
+/**
+ * amdgpu_need_post - check if the hw need post or not
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Check if the asic has been initialized (all asics) at driver startup
+ * or post is needed if hw reset is performed.
+ * Returns true if need or false if not.
+ */
+bool amdgpu_need_post(struct amdgpu_device *adev)
{
+ uint32_t reg;
+
if (amdgpu_sriov_vf(adev))
return false;
@@ -715,7 +766,23 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
return true;
}
}
- return amdgpu_need_post(adev);
+
+ if (adev->has_hw_reset) {
+ adev->has_hw_reset = false;
+ return true;
+ }
+
+ /* bios scratch used on CIK+ */
+ if (adev->asic_type >= CHIP_BONAIRE)
+ return amdgpu_atombios_scratch_need_asic_init(adev);
+
+ /* check MEM_SIZE for older asics */
+ reg = amdgpu_asic_get_config_memsize(adev);
+
+ if ((reg != 0) && (reg != 0xffffffff))
+ return false;
+
+ return true;
}
/**
@@ -1879,6 +1946,7 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
static enum amd_ip_block_type ip_order[] = {
AMD_IP_BLOCK_TYPE_SMC,
+ AMD_IP_BLOCK_TYPE_PSP,
AMD_IP_BLOCK_TYPE_DCE,
AMD_IP_BLOCK_TYPE_GFX,
AMD_IP_BLOCK_TYPE_SDMA,
@@ -1964,12 +2032,17 @@ static int amdgpu_resume(struct amdgpu_device *adev)
static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
{
- if (adev->is_atom_fw) {
- if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
- } else {
- if (amdgpu_atombios_has_gpu_virtualization_table(adev))
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
+ if (amdgpu_sriov_vf(adev)) {
+ if (adev->is_atom_fw) {
+ if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
+ } else {
+ if (amdgpu_atombios_has_gpu_virtualization_table(adev))
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
+ }
+
+ if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
}
}
@@ -2010,6 +2083,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->vm_manager.vm_pte_num_rings = 0;
adev->gart.gart_funcs = NULL;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
+ bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
adev->smc_rreg = &amdgpu_invalid_rreg;
adev->smc_wreg = &amdgpu_invalid_wreg;
@@ -2038,8 +2112,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->pm.mutex);
mutex_init(&adev->gfx.gpu_clock_mutex);
mutex_init(&adev->srbm_mutex);
+ mutex_init(&adev->gfx.pipe_reserve_mutex);
mutex_init(&adev->grbm_idx_mutex);
mutex_init(&adev->mn_lock);
+ mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash);
amdgpu_check_arguments(adev);
@@ -2125,7 +2201,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atombios_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atombios_init failed\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
goto failed;
}
@@ -2133,10 +2209,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_device_detect_sriov_bios(adev);
/* Post card if necessary */
- if (amdgpu_vpost_needed(adev)) {
+ if (amdgpu_need_post(adev)) {
if (!adev->bios) {
dev_err(adev->dev, "no vBIOS found\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
r = -EINVAL;
goto failed;
}
@@ -2144,7 +2219,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
if (r) {
dev_err(adev->dev, "gpu post error!\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0);
goto failed;
}
} else {
@@ -2156,7 +2230,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atomfirmware_get_clock_info(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
goto failed;
}
} else {
@@ -2164,7 +2238,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atombios_get_clock_info(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
goto failed;
}
/* init i2c buses */
@@ -2175,7 +2249,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_fence_driver_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
goto failed;
}
@@ -2185,7 +2259,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_init failed\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
amdgpu_fini(adev);
goto failed;
}
@@ -2205,7 +2279,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_ib_pool_init(adev);
if (r) {
dev_err(adev->dev, "IB initialization failed (%d).\n", r);
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
goto failed;
}
@@ -2213,8 +2287,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_init_data_exchange(adev);
+
amdgpu_fbdev_init(adev);
+ r = amdgpu_pm_sysfs_init(adev);
+ if (r)
+ DRM_ERROR("registering pm debugfs failed (%d).\n", r);
+
r = amdgpu_gem_debugfs_init(adev);
if (r)
DRM_ERROR("registering gem debugfs failed (%d).\n", r);
@@ -2254,7 +2335,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_late_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_late_init failed\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
goto failed;
}
@@ -2286,6 +2367,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
amdgpu_ib_pool_fini(adev);
+ amdgpu_fw_reserve_vram_fini(adev);
amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev);
@@ -2311,6 +2393,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
iounmap(adev->rmmio);
adev->rmmio = NULL;
amdgpu_doorbell_fini(adev);
+ amdgpu_pm_sysfs_fini(adev);
amdgpu_debugfs_regs_cleanup(adev);
}
@@ -2537,6 +2620,9 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
int i;
bool asic_hang = false;
+ if (amdgpu_sriov_vf(adev))
+ return true;
+
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
@@ -2936,7 +3022,6 @@ out:
}
} else {
dev_err(adev->dev, "asic resume failed (%d).\n", r);
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
if (adev->rings[i] && adev->rings[i]->sched.thread) {
kthread_unpark(adev->rings[i]->sched.thread);
@@ -2950,7 +3035,6 @@ out:
if (r) {
/* bad news, how to tell it to userspace ? */
dev_info(adev->dev, "GPU reset failed\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
}
else {
dev_info(adev->dev, "GPU reset successed!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index f79f9ea58b17..7279fb5c3abc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -356,6 +356,10 @@ enum amdgpu_pcie_gen {
((adev)->powerplay.pp_funcs->switch_power_profile(\
(adev)->powerplay.pp_handle, type))
+#define amdgpu_dpm_set_clockgating_by_smu(adev, msg_id) \
+ ((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
+ (adev)->powerplay.pp_handle, msg_id))
+
struct amdgpu_dpm {
struct amdgpu_ps *ps;
/* number of valid power states */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 4f98960e47f9..dd2f060d62a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -70,9 +70,12 @@
* - 3.18.0 - Export gpu always on cu bitmap
* - 3.19.0 - Add support for UVD MJPEG decode
* - 3.20.0 - Add support for local BOs
+ * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
+ * - 3.22.0 - Add DRM_AMDGPU_SCHED ioctl
+ * - 3.23.0 - Add query for VRAM lost counter
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 20
+#define KMS_DRIVER_MINOR 23
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -122,6 +125,7 @@ int amdgpu_cntl_sb_buf_per_se = 0;
int amdgpu_param_buf_per_se = 0;
int amdgpu_job_hang_limit = 0;
int amdgpu_lbpw = -1;
+int amdgpu_compute_multipipe = -1;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -265,6 +269,9 @@ module_param_named(job_hang_limit, amdgpu_job_hang_limit, int ,0444);
MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(lbpw, amdgpu_lbpw, int, 0444);
+MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
+
#ifdef CONFIG_DRM_AMDGPU_SI
#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 333bad749067..fb9f88ef6059 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -169,6 +169,32 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
}
/**
+ * amdgpu_fence_emit_polling - emit a fence on the requeste ring
+ *
+ * @ring: ring the fence is associated with
+ * @s: resulting sequence number
+ *
+ * Emits a fence command on the requested ring (all asics).
+ * Used For polling fence.
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
+{
+ uint32_t seq;
+
+ if (!s)
+ return -EINVAL;
+
+ seq = ++ring->fence_drv.sync_seq;
+ amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
+ seq, AMDGPU_FENCE_FLAG_INT);
+
+ *s = seq;
+
+ return 0;
+}
+
+/**
* amdgpu_fence_schedule_fallback - schedule fallback check
*
* @ring: pointer to struct amdgpu_ring
@@ -282,6 +308,30 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
}
/**
+ * amdgpu_fence_wait_polling - busy wait for givn sequence number
+ *
+ * @ring: ring index the fence is associated with
+ * @wait_seq: sequence number to wait
+ * @timeout: the timeout for waiting in usecs
+ *
+ * Wait for all fences on the requested ring to signal (all asics).
+ * Returns left time if no timeout, 0 or minus if timeout.
+ */
+signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
+ uint32_t wait_seq,
+ signed long timeout)
+{
+ uint32_t seq;
+
+ do {
+ seq = amdgpu_fence_read(ring);
+ udelay(5);
+ timeout -= 5;
+ } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
+
+ return timeout > 0 ? timeout : 0;
+}
+/**
* amdgpu_fence_count_emitted - get the count of emitted fences
*
* @ring: ring the fence is associated with
@@ -641,6 +691,19 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
atomic_read(&ring->fence_drv.last_seq));
seq_printf(m, "Last emitted 0x%08x\n",
ring->fence_drv.sync_seq);
+
+ if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
+ continue;
+
+ /* set in CP_VMID_PREEMPT and preemption occurred */
+ seq_printf(m, "Last preempted 0x%08x\n",
+ le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
+ /* set in CP_VMID_RESET and reset occurred */
+ seq_printf(m, "Last reset 0x%08x\n",
+ le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
+ /* Both preemption and reset occurred */
+ seq_printf(m, "Last both 0x%08x\n",
+ le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index f4370081f6e6..fe818501c520 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -332,12 +332,13 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
adev->gart.pages[p] = pagelist[i];
#endif
- if (adev->gart.ptr) {
- r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
- adev->gart.ptr);
- if (r)
- return r;
- }
+ if (!adev->gart.ptr)
+ return 0;
+
+ r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
+ adev->gart.ptr);
+ if (r)
+ return r;
mb();
amdgpu_gart_flush_gpu_tlb(adev, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index b0d45c8e6bb3..fb72edc4c026 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -212,7 +212,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_VRAM_CLEARED |
- AMDGPU_GEM_CREATE_VM_ALWAYS_VALID))
+ AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
+ AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
+
return -EINVAL;
/* reject invalid gem domains */
@@ -577,11 +579,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->operation);
return -EINVAL;
}
- if ((args->operation == AMDGPU_VA_OP_MAP) ||
- (args->operation == AMDGPU_VA_OP_REPLACE)) {
- if (amdgpu_kms_vram_lost(adev, fpriv))
- return -ENODEV;
- }
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 4fcd98e65998..ef043361009f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -109,9 +109,26 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_s
}
}
+static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
+{
+ if (amdgpu_compute_multipipe != -1) {
+ DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
+ amdgpu_compute_multipipe);
+ return amdgpu_compute_multipipe == 1;
+ }
+
+ /* FIXME: spreading the queues across pipes causes perf regressions
+ * on POLARIS11 compute workloads */
+ if (adev->asic_type == CHIP_POLARIS11)
+ return false;
+
+ return adev->gfx.mec.num_mec > 1;
+}
+
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
{
int i, queue, pipe, mec;
+ bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
/* policy for amdgpu compute queue ownership */
for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
@@ -125,8 +142,7 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
if (mec >= adev->gfx.mec.num_mec)
break;
- /* FIXME: spreading the queues across pipes causes perf regressions */
- if (0) {
+ if (multipipe_policy) {
/* policy: amdgpu owns the first two queues of the first MEC */
if (mec == 0 && queue < 2)
set_bit(i, adev->gfx.mec.queue_bitmap);
@@ -185,7 +201,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
int r = 0;
- mutex_init(&kiq->ring_mutex);
+ spin_lock_init(&kiq->ring_lock);
r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 0d15eb7d31d7..33535d347734 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -169,7 +169,8 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
int r;
spin_lock(&mgr->lock);
- if (atomic64_read(&mgr->available) < mem->num_pages) {
+ if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
+ atomic64_read(&mgr->available) < mem->num_pages) {
spin_unlock(&mgr->lock);
return 0;
}
@@ -244,8 +245,9 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
{
struct amdgpu_gtt_mgr *mgr = man->priv;
+ s64 result = man->size - atomic64_read(&mgr->available);
- return (u64)(man->size - atomic64_read(&mgr->available)) * PAGE_SIZE;
+ return (result > 0 ? result : 0) * PAGE_SIZE;
}
/**
@@ -265,7 +267,7 @@ static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
drm_mm_print(&mgr->mm, printer);
spin_unlock(&mgr->lock);
- drm_printf(printer, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n",
+ drm_printf(printer, "man size:%llu pages, gtt available:%lld pages, usage:%lluMB\n",
man->size, (u64)atomic64_read(&mgr->available),
amdgpu_gtt_mgr_usage(man) >> 20);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 4510627ae83e..0cfc68db575b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -65,6 +65,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
amdgpu_sync_create(&(*job)->sync);
amdgpu_sync_create(&(*job)->dep_sync);
amdgpu_sync_create(&(*job)->sched_sync);
+ (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
return 0;
}
@@ -103,6 +104,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
+ amdgpu_ring_priority_put(job->ring, amd_sched_get_job_priority(s_job));
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->dep_sync);
@@ -139,6 +141,8 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
job->fence_ctx = entity->fence_context;
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
+ amdgpu_ring_priority_get(job->ring,
+ amd_sched_get_job_priority(&job->base));
amd_sched_entity_push_job(&job->base);
return 0;
@@ -177,8 +181,8 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
{
struct dma_fence *fence = NULL;
+ struct amdgpu_device *adev;
struct amdgpu_job *job;
- struct amdgpu_fpriv *fpriv = NULL;
int r;
if (!sched_job) {
@@ -186,23 +190,25 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
return NULL;
}
job = to_amdgpu_job(sched_job);
+ adev = job->adev;
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
trace_amdgpu_sched_run_job(job);
- if (job->vm)
- fpriv = container_of(job->vm, struct amdgpu_fpriv, vm);
/* skip ib schedule when vram is lost */
- if (fpriv && amdgpu_kms_vram_lost(job->adev, fpriv))
+ if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) {
+ dma_fence_set_error(&job->base.s_fence->finished, -ECANCELED);
DRM_ERROR("Skip scheduling IBs!\n");
- else {
- r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, &fence);
+ } else {
+ r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
+ &fence);
if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
}
/* if gpu reset, hw fence will be replaced here */
dma_fence_put(job->fence);
job->fence = dma_fence_get(fence);
+
amdgpu_job_free_resources(job);
return fence;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 4fd06f8d9768..6f0b26dae3b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -28,6 +28,7 @@
#include <drm/drmP.h>
#include "amdgpu.h"
#include <drm/amdgpu_drm.h>
+#include "amdgpu_sched.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
@@ -269,7 +270,6 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct drm_amdgpu_info *info = data;
struct amdgpu_mode_info *minfo = &adev->mode_info;
void __user *out = (void __user *)(uintptr_t)info->return_pointer;
@@ -282,8 +282,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (!info->return_size || !info->return_pointer)
return -EINVAL;
- if (amdgpu_kms_vram_lost(adev, fpriv))
- return -ENODEV;
switch (info->query) {
case AMDGPU_INFO_ACCEL_WORKING:
@@ -765,6 +763,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
}
+ case AMDGPU_INFO_VRAM_LOST_COUNTER:
+ ui32 = atomic_read(&adev->vram_lost_counter);
+ return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@@ -791,12 +792,6 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev)
vga_switcheroo_process_delayed_switch();
}
-bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
- struct amdgpu_fpriv *fpriv)
-{
- return fpriv->vram_lost_counter != atomic_read(&adev->vram_lost_counter);
-}
-
/**
* amdgpu_driver_open_kms - drm callback for open
*
@@ -853,7 +848,6 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
- fpriv->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
file_priv->driver_priv = fpriv;
out_suspend:
@@ -1023,7 +1017,9 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
/* KMS */
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 6982baeccd14..ea25164e7f4b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -40,9 +40,7 @@
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
- struct amdgpu_bo *bo;
-
- bo = container_of(tbo, struct amdgpu_bo, tbo);
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
amdgpu_bo_kunmap(bo);
@@ -371,6 +369,9 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL,
acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
+ if (unlikely(r != 0))
+ return r;
+
bytes_moved = atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
@@ -380,9 +381,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
else
amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
- if (unlikely(r != 0))
- return r;
-
if (kernel)
bo->tbo.priority = 1;
@@ -884,7 +882,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
return;
- abo = container_of(bo, struct amdgpu_bo, tbo);
+ abo = ttm_to_amdgpu_bo(bo);
amdgpu_vm_bo_invalidate(adev, abo, evict);
amdgpu_bo_kunmap(abo);
@@ -911,7 +909,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
return 0;
- abo = container_of(bo, struct amdgpu_bo, tbo);
+ abo = ttm_to_amdgpu_bo(bo);
/* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 39b6bf6fb051..428aae048f4b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -94,6 +94,11 @@ struct amdgpu_bo {
};
};
+static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
+{
+ return container_of(tbo, struct amdgpu_bo, tbo);
+}
+
/**
* amdgpu_mem_type_to_domain - return domain corresponding to mem_type
* @mem_type: ttm memory type
@@ -188,6 +193,14 @@ static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
}
}
+/**
+ * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
+ */
+static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
+{
+ return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
+}
+
int amdgpu_bo_create(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index f6ce52956e6d..a59e04f3eeba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -64,10 +64,6 @@ static const struct cg_flag_name clocks[] = {
void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
{
- if (adev->pp_enabled)
- /* TODO */
- return;
-
if (adev->pm.dpm_enabled) {
mutex_lock(&adev->pm.mutex);
if (power_supply_is_system_supplied() > 0)
@@ -118,7 +114,7 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
goto fail;
}
- if (adev->pp_enabled) {
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL);
} else {
mutex_lock(&adev->pm.mutex);
@@ -303,7 +299,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
if (strlen(buf) == 1)
adev->pp_force_state_enabled = false;
- else if (adev->pp_enabled) {
+ else if (adev->powerplay.pp_funcs->dispatch_tasks &&
+ adev->powerplay.pp_funcs->get_pp_num_states) {
struct pp_states_info data;
ret = kstrtoul(buf, 0, &idx);
@@ -531,7 +528,7 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
if (adev->powerplay.pp_funcs->set_sclk_od)
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
- if (adev->pp_enabled) {
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
} else {
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
@@ -575,7 +572,7 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
if (adev->powerplay.pp_funcs->set_mclk_od)
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
- if (adev->pp_enabled) {
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
} else {
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
@@ -959,9 +956,6 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
return 0;
- if (adev->pp_enabled)
- return effective_mode;
-
/* Skip fan attributes if fan is not present */
if (adev->pm.no_fan &&
(attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
@@ -1317,6 +1311,9 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
if (adev->pm.sysfs_initialized)
return 0;
+ if (adev->pm.dpm_enabled == 0)
+ return 0;
+
if (adev->powerplay.pp_funcs->get_temperature == NULL)
return 0;
@@ -1341,27 +1338,26 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
return ret;
}
- if (adev->pp_enabled) {
- ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
- if (ret) {
- DRM_ERROR("failed to create device file pp_num_states\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
- if (ret) {
- DRM_ERROR("failed to create device file pp_cur_state\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
- if (ret) {
- DRM_ERROR("failed to create device file pp_force_state\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_table);
- if (ret) {
- DRM_ERROR("failed to create device file pp_table\n");
- return ret;
- }
+
+ ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_num_states\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_cur_state\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_force_state\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_table);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_table\n");
+ return ret;
}
ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
@@ -1417,16 +1413,19 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
{
+ if (adev->pm.dpm_enabled == 0)
+ return;
+
if (adev->pm.int_hwmon_dev)
hwmon_device_unregister(adev->pm.int_hwmon_dev);
device_remove_file(adev->dev, &dev_attr_power_dpm_state);
device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
- if (adev->pp_enabled) {
- device_remove_file(adev->dev, &dev_attr_pp_num_states);
- device_remove_file(adev->dev, &dev_attr_pp_cur_state);
- device_remove_file(adev->dev, &dev_attr_pp_force_state);
- device_remove_file(adev->dev, &dev_attr_pp_table);
- }
+
+ device_remove_file(adev->dev, &dev_attr_pp_num_states);
+ device_remove_file(adev->dev, &dev_attr_pp_cur_state);
+ device_remove_file(adev->dev, &dev_attr_pp_force_state);
+ device_remove_file(adev->dev, &dev_attr_pp_table);
+
device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
@@ -1457,7 +1456,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
amdgpu_fence_wait_empty(ring);
}
- if (adev->pp_enabled) {
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL, NULL);
} else {
mutex_lock(&adev->pm.mutex);
@@ -1592,15 +1591,15 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
if ((adev->flags & AMD_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
seq_printf(m, "PX asic powered off\n");
- } else if (adev->pp_enabled) {
- return amdgpu_debugfs_pm_info_pp(m, adev);
- } else {
+ } else if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
mutex_lock(&adev->pm.mutex);
if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
else
seq_printf(m, "Debugfs support not implemented for this asic\n");
mutex_unlock(&adev->pm.mutex);
+ } else {
+ return amdgpu_debugfs_pm_info_pp(m, adev);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 2d2f0960b025..5f5aa5fddc16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -34,24 +34,6 @@
#include "cik_dpm.h"
#include "vi_dpm.h"
-static int amdgpu_create_pp_handle(struct amdgpu_device *adev)
-{
- struct amd_pp_init pp_init;
- struct amd_powerplay *amd_pp;
- int ret;
-
- amd_pp = &(adev->powerplay);
- pp_init.chip_family = adev->family;
- pp_init.chip_id = adev->asic_type;
- pp_init.pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
- pp_init.feature_mask = amdgpu_pp_feature_mask;
- pp_init.device = amdgpu_cgs_create_device(adev);
- ret = amd_powerplay_create(&pp_init, &(amd_pp->pp_handle));
- if (ret)
- return -EINVAL;
- return 0;
-}
-
static int amdgpu_pp_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -59,7 +41,6 @@ static int amdgpu_pp_early_init(void *handle)
int ret = 0;
amd_pp = &(adev->powerplay);
- adev->pp_enabled = false;
amd_pp->pp_handle = (void *)adev;
switch (adev->asic_type) {
@@ -73,9 +54,7 @@ static int amdgpu_pp_early_init(void *handle)
case CHIP_STONEY:
case CHIP_VEGA10:
case CHIP_RAVEN:
- adev->pp_enabled = true;
- if (amdgpu_create_pp_handle(adev))
- return -EINVAL;
+ amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
amd_pp->ip_funcs = &pp_ip_funcs;
amd_pp->pp_funcs = &pp_dpm_funcs;
break;
@@ -97,9 +76,7 @@ static int amdgpu_pp_early_init(void *handle)
amd_pp->ip_funcs = &ci_dpm_ip_funcs;
amd_pp->pp_funcs = &ci_dpm_funcs;
} else {
- adev->pp_enabled = true;
- if (amdgpu_create_pp_handle(adev))
- return -EINVAL;
+ amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
amd_pp->ip_funcs = &pp_ip_funcs;
amd_pp->pp_funcs = &pp_dpm_funcs;
}
@@ -118,12 +95,9 @@ static int amdgpu_pp_early_init(void *handle)
if (adev->powerplay.ip_funcs->early_init)
ret = adev->powerplay.ip_funcs->early_init(
- adev->powerplay.pp_handle);
+ amd_pp->cgs_device ? amd_pp->cgs_device :
+ amd_pp->pp_handle);
- if (ret == PP_DPM_DISABLED) {
- adev->pm.dpm_enabled = false;
- return 0;
- }
return ret;
}
@@ -137,11 +111,6 @@ static int amdgpu_pp_late_init(void *handle)
ret = adev->powerplay.ip_funcs->late_init(
adev->powerplay.pp_handle);
- if (adev->pp_enabled && adev->pm.dpm_enabled) {
- amdgpu_pm_sysfs_init(adev);
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_COMPLETE_INIT, NULL, NULL);
- }
-
return ret;
}
@@ -176,21 +145,13 @@ static int amdgpu_pp_hw_init(void *handle)
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->pp_enabled && adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
amdgpu_ucode_init_bo(adev);
if (adev->powerplay.ip_funcs->hw_init)
ret = adev->powerplay.ip_funcs->hw_init(
adev->powerplay.pp_handle);
- if (ret == PP_DPM_DISABLED) {
- adev->pm.dpm_enabled = false;
- return 0;
- }
-
- if ((amdgpu_dpm != 0) && !amdgpu_sriov_vf(adev))
- adev->pm.dpm_enabled = true;
-
return ret;
}
@@ -199,14 +160,11 @@ static int amdgpu_pp_hw_fini(void *handle)
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->pp_enabled && adev->pm.dpm_enabled)
- amdgpu_pm_sysfs_fini(adev);
-
if (adev->powerplay.ip_funcs->hw_fini)
ret = adev->powerplay.ip_funcs->hw_fini(
adev->powerplay.pp_handle);
- if (adev->pp_enabled && adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
amdgpu_ucode_fini_bo(adev);
return ret;
@@ -220,9 +178,8 @@ static void amdgpu_pp_late_fini(void *handle)
adev->powerplay.ip_funcs->late_fini(
adev->powerplay.pp_handle);
-
- if (adev->pp_enabled)
- amd_powerplay_destroy(adev->powerplay.pp_handle);
+ if (adev->powerplay.cgs_device)
+ amdgpu_cgs_destroy_device(adev->powerplay.cgs_device);
}
static int amdgpu_pp_suspend(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
index befc09b68543..190e28cb827e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
@@ -121,7 +121,7 @@ static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip)
static int amdgpu_lru_map(struct amdgpu_device *adev,
struct amdgpu_queue_mapper *mapper,
- int user_ring,
+ int user_ring, bool lru_pipe_order,
struct amdgpu_ring **out_ring)
{
int r, i, j;
@@ -139,7 +139,7 @@ static int amdgpu_lru_map(struct amdgpu_device *adev,
}
r = amdgpu_ring_lru_get(adev, ring_type, ring_blacklist,
- j, out_ring);
+ j, lru_pipe_order, out_ring);
if (r)
return r;
@@ -284,8 +284,10 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
r = amdgpu_identity_map(adev, mapper, ring, out_ring);
break;
case AMDGPU_HW_IP_DMA:
+ r = amdgpu_lru_map(adev, mapper, ring, false, out_ring);
+ break;
case AMDGPU_HW_IP_COMPUTE:
- r = amdgpu_lru_map(adev, mapper, ring, out_ring);
+ r = amdgpu_lru_map(adev, mapper, ring, true, out_ring);
break;
default:
*out_ring = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 5ce65280b396..a98fbbb4739f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -136,7 +136,8 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
if (ring->funcs->end_use)
ring->funcs->end_use(ring);
- amdgpu_ring_lru_touch(ring->adev, ring);
+ if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ)
+ amdgpu_ring_lru_touch(ring->adev, ring);
}
/**
@@ -155,6 +156,75 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
}
/**
+ * amdgpu_ring_priority_put - restore a ring's priority
+ *
+ * @ring: amdgpu_ring structure holding the information
+ * @priority: target priority
+ *
+ * Release a request for executing at @priority
+ */
+void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
+ enum amd_sched_priority priority)
+{
+ int i;
+
+ if (!ring->funcs->set_priority)
+ return;
+
+ if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
+ return;
+
+ /* no need to restore if the job is already at the lowest priority */
+ if (priority == AMD_SCHED_PRIORITY_NORMAL)
+ return;
+
+ mutex_lock(&ring->priority_mutex);
+ /* something higher prio is executing, no need to decay */
+ if (ring->priority > priority)
+ goto out_unlock;
+
+ /* decay priority to the next level with a job available */
+ for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) {
+ if (i == AMD_SCHED_PRIORITY_NORMAL
+ || atomic_read(&ring->num_jobs[i])) {
+ ring->priority = i;
+ ring->funcs->set_priority(ring, i);
+ break;
+ }
+ }
+
+out_unlock:
+ mutex_unlock(&ring->priority_mutex);
+}
+
+/**
+ * amdgpu_ring_priority_get - change the ring's priority
+ *
+ * @ring: amdgpu_ring structure holding the information
+ * @priority: target priority
+ *
+ * Request a ring's priority to be raised to @priority (refcounted).
+ */
+void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
+ enum amd_sched_priority priority)
+{
+ if (!ring->funcs->set_priority)
+ return;
+
+ atomic_inc(&ring->num_jobs[priority]);
+
+ mutex_lock(&ring->priority_mutex);
+ if (priority <= ring->priority)
+ goto out_unlock;
+
+ ring->priority = priority;
+ ring->funcs->set_priority(ring, priority);
+
+out_unlock:
+ mutex_unlock(&ring->priority_mutex);
+}
+
+/**
* amdgpu_ring_init - init driver ring struct.
*
* @adev: amdgpu_device pointer
@@ -169,7 +239,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned max_dw, struct amdgpu_irq_src *irq_src,
unsigned irq_type)
{
- int r;
+ int r, i;
int sched_hw_submission = amdgpu_sched_hw_submission;
/* Set the hw submission limit higher for KIQ because
@@ -247,9 +317,14 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
}
ring->max_dw = max_dw;
+ ring->priority = AMD_SCHED_PRIORITY_NORMAL;
+ mutex_init(&ring->priority_mutex);
INIT_LIST_HEAD(&ring->lru_list);
amdgpu_ring_lru_touch(adev, ring);
+ for (i = 0; i < AMD_SCHED_PRIORITY_MAX; ++i)
+ atomic_set(&ring->num_jobs[i], 0);
+
if (amdgpu_debugfs_ring_init(adev, ring)) {
DRM_ERROR("Failed to register debugfs file for rings !\n");
}
@@ -315,14 +390,16 @@ static bool amdgpu_ring_is_blacklisted(struct amdgpu_ring *ring,
* @type: amdgpu_ring_type enum
* @blacklist: blacklisted ring ids array
* @num_blacklist: number of entries in @blacklist
+ * @lru_pipe_order: find a ring from the least recently used pipe
* @ring: output ring
*
* Retrieve the amdgpu_ring structure for the least recently used ring of
* a specific IP block (all asics).
* Returns 0 on success, error on failure.
*/
-int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist,
- int num_blacklist, struct amdgpu_ring **ring)
+int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type,
+ int *blacklist, int num_blacklist,
+ bool lru_pipe_order, struct amdgpu_ring **ring)
{
struct amdgpu_ring *entry;
@@ -337,10 +414,23 @@ int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist,
if (amdgpu_ring_is_blacklisted(entry, blacklist, num_blacklist))
continue;
- *ring = entry;
- amdgpu_ring_lru_touch_locked(adev, *ring);
- break;
+ if (!*ring) {
+ *ring = entry;
+
+ /* We are done for ring LRU */
+ if (!lru_pipe_order)
+ break;
+ }
+
+ /* Move all rings on the same pipe to the end of the list */
+ if (entry->pipe == (*ring)->pipe)
+ amdgpu_ring_lru_touch_locked(adev, entry);
}
+
+ /* Move the ring we found to the end of the list */
+ if (*ring)
+ amdgpu_ring_lru_touch_locked(adev, *ring);
+
spin_unlock(&adev->ring_lru_list_lock);
if (!*ring) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 322d25299a00..b18c2b96691f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -24,6 +24,7 @@
#ifndef __AMDGPU_RING_H__
#define __AMDGPU_RING_H__
+#include <drm/amdgpu_drm.h>
#include "gpu_scheduler.h"
/* max number of rings */
@@ -56,6 +57,7 @@ struct amdgpu_device;
struct amdgpu_ring;
struct amdgpu_ib;
struct amdgpu_cs_parser;
+struct amdgpu_job;
/*
* Fences.
@@ -88,8 +90,12 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
+int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
void amdgpu_fence_process(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
+signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
+ uint32_t wait_seq,
+ signed long timeout);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
/*
@@ -147,6 +153,9 @@ struct amdgpu_ring_funcs {
void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
+ /* priority functions */
+ void (*set_priority) (struct amdgpu_ring *ring,
+ enum amd_sched_priority priority);
};
struct amdgpu_ring {
@@ -187,6 +196,12 @@ struct amdgpu_ring {
volatile u32 *cond_exe_cpu_addr;
unsigned vm_inv_eng;
bool has_compute_vm_bug;
+
+ atomic_t num_jobs[AMD_SCHED_PRIORITY_MAX];
+ struct mutex priority_mutex;
+ /* protected by priority_mutex */
+ int priority;
+
#if defined(CONFIG_DEBUG_FS)
struct dentry *ent;
#endif
@@ -197,12 +212,17 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
+void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
+ enum amd_sched_priority priority);
+void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
+ enum amd_sched_priority priority);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned ring_size, struct amdgpu_irq_src *irq_src,
unsigned irq_type);
void amdgpu_ring_fini(struct amdgpu_ring *ring);
-int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist,
- int num_blacklist, struct amdgpu_ring **ring);
+int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type,
+ int *blacklist, int num_blacklist,
+ bool lru_pipe_order, struct amdgpu_ring **ring);
void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring);
static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
new file mode 100644
index 000000000000..290cc3f9c433
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2017 Valve Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Andres Rodriguez <andresx7@gmail.com>
+ */
+
+#include <linux/fdtable.h>
+#include <linux/pid.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+
+#include "amdgpu_vm.h"
+
+enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
+{
+ switch (amdgpu_priority) {
+ case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+ return AMD_SCHED_PRIORITY_HIGH_HW;
+ case AMDGPU_CTX_PRIORITY_HIGH:
+ return AMD_SCHED_PRIORITY_HIGH_SW;
+ case AMDGPU_CTX_PRIORITY_NORMAL:
+ return AMD_SCHED_PRIORITY_NORMAL;
+ case AMDGPU_CTX_PRIORITY_LOW:
+ case AMDGPU_CTX_PRIORITY_VERY_LOW:
+ return AMD_SCHED_PRIORITY_LOW;
+ case AMDGPU_CTX_PRIORITY_UNSET:
+ return AMD_SCHED_PRIORITY_UNSET;
+ default:
+ WARN(1, "Invalid context priority %d\n", amdgpu_priority);
+ return AMD_SCHED_PRIORITY_INVALID;
+ }
+}
+
+static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
+ int fd,
+ enum amd_sched_priority priority)
+{
+ struct file *filp = fcheck(fd);
+ struct drm_file *file;
+ struct pid *pid;
+ struct amdgpu_fpriv *fpriv;
+ struct amdgpu_ctx *ctx;
+ uint32_t id;
+
+ if (!filp)
+ return -EINVAL;
+
+ pid = get_pid(((struct drm_file *)filp->private_data)->pid);
+
+ mutex_lock(&adev->ddev->filelist_mutex);
+ list_for_each_entry(file, &adev->ddev->filelist, lhead) {
+ if (file->pid != pid)
+ continue;
+
+ fpriv = file->driver_priv;
+ idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
+ amdgpu_ctx_priority_override(ctx, priority);
+ }
+ mutex_unlock(&adev->ddev->filelist_mutex);
+
+ put_pid(pid);
+
+ return 0;
+}
+
+int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ union drm_amdgpu_sched *args = data;
+ struct amdgpu_device *adev = dev->dev_private;
+ enum amd_sched_priority priority;
+ int r;
+
+ priority = amdgpu_to_sched_priority(args->in.priority);
+ if (args->in.flags || priority == AMD_SCHED_PRIORITY_INVALID)
+ return -EINVAL;
+
+ switch (args->in.op) {
+ case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE:
+ r = amdgpu_sched_process_priority_override(adev,
+ args->in.fd,
+ priority);
+ break;
+ default:
+ DRM_ERROR("Invalid sched op specified: %d\n", args->in.op);
+ r = -EINVAL;
+ break;
+ }
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
new file mode 100644
index 000000000000..b28c067d3822
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Valve Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Andres Rodriguez <andresx7@gmail.com>
+ */
+
+#ifndef __AMDGPU_SCHED_H__
+#define __AMDGPU_SCHED_H__
+
+#include <drm/drmP.h>
+
+enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
+int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
+#endif // __AMDGPU_SCHED_H__
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index c586f44312f9..a4bf21f8f1c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -169,14 +169,14 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
*
* @sync: sync object to add fences from reservation object to
* @resv: reservation object with embedded fence
- * @shared: true if we should only sync to the exclusive fence
+ * @explicit_sync: true if we should only sync to the exclusive fence
*
* Sync to the fence
*/
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
- void *owner)
+ void *owner, bool explicit_sync)
{
struct reservation_object_list *flist;
struct dma_fence *f;
@@ -191,6 +191,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
f = reservation_object_get_excl(resv);
r = amdgpu_sync_fence(adev, sync, f);
+ if (explicit_sync)
+ return r;
+
flist = reservation_object_get_list(resv);
if (!flist || r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index dc7687993317..70d7e3a279a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -45,7 +45,8 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
- void *owner);
+ void *owner,
+ bool explicit_sync);
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 15a28578d458..1f036af85ba6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -44,6 +44,7 @@
#include <linux/debugfs.h>
#include <linux/iommu.h>
#include "amdgpu.h"
+#include "amdgpu_object.h"
#include "amdgpu_trace.h"
#include "bif/bif_4_1_d.h"
@@ -209,7 +210,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
placement->num_busy_placement = 1;
return;
}
- abo = container_of(bo, struct amdgpu_bo, tbo);
+ abo = ttm_to_amdgpu_bo(bo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
if (adev->mman.buffer_funcs &&
@@ -257,7 +258,7 @@ gtt:
static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
if (amdgpu_ttm_tt_get_usermm(bo->ttm))
return -EPERM;
@@ -289,97 +290,177 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
return addr;
}
-static int amdgpu_move_blit(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem,
- struct ttm_mem_reg *old_mem)
+/**
+ * amdgpu_find_mm_node - Helper function finds the drm_mm_node
+ * corresponding to @offset. It also modifies the offset to be
+ * within the drm_mm_node returned
+ */
+static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
+ unsigned long *offset)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
- struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ struct drm_mm_node *mm_node = mem->mm_node;
- struct drm_mm_node *old_mm, *new_mm;
- uint64_t old_start, old_size, new_start, new_size;
- unsigned long num_pages;
- struct dma_fence *fence = NULL;
- int r;
+ while (*offset >= (mm_node->size << PAGE_SHIFT)) {
+ *offset -= (mm_node->size << PAGE_SHIFT);
+ ++mm_node;
+ }
+ return mm_node;
+}
- BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
+/**
+ * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
+ *
+ * The function copies @size bytes from {src->mem + src->offset} to
+ * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
+ * move and different for a BO to BO copy.
+ *
+ * @f: Returns the last fence if multiple jobs are submitted.
+ */
+int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ struct amdgpu_copy_mem *src,
+ struct amdgpu_copy_mem *dst,
+ uint64_t size,
+ struct reservation_object *resv,
+ struct dma_fence **f)
+{
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ struct drm_mm_node *src_mm, *dst_mm;
+ uint64_t src_node_start, dst_node_start, src_node_size,
+ dst_node_size, src_page_offset, dst_page_offset;
+ struct dma_fence *fence = NULL;
+ int r = 0;
+ const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
+ AMDGPU_GPU_PAGE_SIZE);
if (!ring->ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
- old_mm = old_mem->mm_node;
- old_size = old_mm->size;
- old_start = amdgpu_mm_node_addr(bo, old_mm, old_mem);
+ src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
+ src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
+ src->offset;
+ src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
+ src_page_offset = src_node_start & (PAGE_SIZE - 1);
- new_mm = new_mem->mm_node;
- new_size = new_mm->size;
- new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem);
+ dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
+ dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
+ dst->offset;
+ dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
+ dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
- num_pages = new_mem->num_pages;
mutex_lock(&adev->mman.gtt_window_lock);
- while (num_pages) {
- unsigned long cur_pages = min(min(old_size, new_size),
- (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE);
- uint64_t from = old_start, to = new_start;
+
+ while (size) {
+ unsigned long cur_size;
+ uint64_t from = src_node_start, to = dst_node_start;
struct dma_fence *next;
- if (old_mem->mem_type == TTM_PL_TT &&
- !amdgpu_gtt_mgr_is_allocated(old_mem)) {
- r = amdgpu_map_buffer(bo, old_mem, cur_pages,
- old_start, 0, ring, &from);
+ /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
+ * begins at an offset, then adjust the size accordingly
+ */
+ cur_size = min3(min(src_node_size, dst_node_size), size,
+ GTT_MAX_BYTES);
+ if (cur_size + src_page_offset > GTT_MAX_BYTES ||
+ cur_size + dst_page_offset > GTT_MAX_BYTES)
+ cur_size -= max(src_page_offset, dst_page_offset);
+
+ /* Map only what needs to be accessed. Map src to window 0 and
+ * dst to window 1
+ */
+ if (src->mem->mem_type == TTM_PL_TT &&
+ !amdgpu_gtt_mgr_is_allocated(src->mem)) {
+ r = amdgpu_map_buffer(src->bo, src->mem,
+ PFN_UP(cur_size + src_page_offset),
+ src_node_start, 0, ring,
+ &from);
if (r)
goto error;
+ /* Adjust the offset because amdgpu_map_buffer returns
+ * start of mapped page
+ */
+ from += src_page_offset;
}
- if (new_mem->mem_type == TTM_PL_TT &&
- !amdgpu_gtt_mgr_is_allocated(new_mem)) {
- r = amdgpu_map_buffer(bo, new_mem, cur_pages,
- new_start, 1, ring, &to);
+ if (dst->mem->mem_type == TTM_PL_TT &&
+ !amdgpu_gtt_mgr_is_allocated(dst->mem)) {
+ r = amdgpu_map_buffer(dst->bo, dst->mem,
+ PFN_UP(cur_size + dst_page_offset),
+ dst_node_start, 1, ring,
+ &to);
if (r)
goto error;
+ to += dst_page_offset;
}
- r = amdgpu_copy_buffer(ring, from, to,
- cur_pages * PAGE_SIZE,
- bo->resv, &next, false, true);
+ r = amdgpu_copy_buffer(ring, from, to, cur_size,
+ resv, &next, false, true);
if (r)
goto error;
dma_fence_put(fence);
fence = next;
- num_pages -= cur_pages;
- if (!num_pages)
+ size -= cur_size;
+ if (!size)
break;
- old_size -= cur_pages;
- if (!old_size) {
- old_start = amdgpu_mm_node_addr(bo, ++old_mm, old_mem);
- old_size = old_mm->size;
+ src_node_size -= cur_size;
+ if (!src_node_size) {
+ src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
+ src->mem);
+ src_node_size = (src_mm->size << PAGE_SHIFT);
} else {
- old_start += cur_pages * PAGE_SIZE;
+ src_node_start += cur_size;
+ src_page_offset = src_node_start & (PAGE_SIZE - 1);
}
-
- new_size -= cur_pages;
- if (!new_size) {
- new_start = amdgpu_mm_node_addr(bo, ++new_mm, new_mem);
- new_size = new_mm->size;
+ dst_node_size -= cur_size;
+ if (!dst_node_size) {
+ dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
+ dst->mem);
+ dst_node_size = (dst_mm->size << PAGE_SHIFT);
} else {
- new_start += cur_pages * PAGE_SIZE;
+ dst_node_start += cur_size;
+ dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
}
}
+error:
mutex_unlock(&adev->mman.gtt_window_lock);
+ if (f)
+ *f = dma_fence_get(fence);
+ dma_fence_put(fence);
+ return r;
+}
+
+
+static int amdgpu_move_blit(struct ttm_buffer_object *bo,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem,
+ struct ttm_mem_reg *old_mem)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct amdgpu_copy_mem src, dst;
+ struct dma_fence *fence = NULL;
+ int r;
+
+ src.bo = bo;
+ dst.bo = bo;
+ src.mem = old_mem;
+ dst.mem = new_mem;
+ src.offset = 0;
+ dst.offset = 0;
+
+ r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
+ new_mem->num_pages << PAGE_SHIFT,
+ bo->resv, &fence);
+ if (r)
+ goto error;
r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
dma_fence_put(fence);
return r;
error:
- mutex_unlock(&adev->mman.gtt_window_lock);
-
if (fence)
dma_fence_wait(fence, false);
dma_fence_put(fence);
@@ -484,7 +565,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
int r;
/* Can't move a pinned BO */
- abo = container_of(bo, struct amdgpu_bo, tbo);
+ abo = ttm_to_amdgpu_bo(bo);
if (WARN_ON_ONCE(abo->pin_count > 0))
return -EINVAL;
@@ -582,13 +663,12 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset)
{
- struct drm_mm_node *mm = bo->mem.mm_node;
- uint64_t size = mm->size;
- uint64_t offset = page_offset;
+ struct drm_mm_node *mm;
+ unsigned long offset = (page_offset << PAGE_SHIFT);
- page_offset = do_div(offset, size);
- mm += offset;
- return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + page_offset;
+ mm = amdgpu_find_mm_node(&bo->mem, &offset);
+ return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
+ (offset >> PAGE_SHIFT);
}
/*
@@ -829,7 +909,8 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
placement.busy_placement = &placements;
placements.fpfn = 0;
placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
- placements.flags = bo->mem.placement | TTM_PL_FLAG_TT;
+ placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
+ TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
if (unlikely(r))
@@ -1112,9 +1193,6 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
unsigned long num_pages = bo->mem.num_pages;
struct drm_mm_node *node = bo->mem.mm_node;
- if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
- return ttm_bo_eviction_valuable(bo, place);
-
switch (bo->mem.mem_type) {
case TTM_PL_TT:
return true;
@@ -1129,7 +1207,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
num_pages -= node->size;
++node;
}
- break;
+ return false;
default:
break;
@@ -1142,9 +1220,9 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
unsigned long offset,
void *buf, int len, int write)
{
- struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
- struct drm_mm_node *nodes = abo->tbo.mem.mm_node;
+ struct drm_mm_node *nodes;
uint32_t value = 0;
int ret = 0;
uint64_t pos;
@@ -1153,10 +1231,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
if (bo->mem.mem_type != TTM_PL_VRAM)
return -EIO;
- while (offset >= (nodes->size << PAGE_SHIFT)) {
- offset -= nodes->size << PAGE_SHIFT;
- ++nodes;
- }
+ nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
pos = (nodes->start << PAGE_SHIFT) + offset;
while (len && pos < adev->mc.mc_vram_size) {
@@ -1255,6 +1330,15 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Change the size here instead of the init above so only lpfn is affected */
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
+ /*
+ *The reserved vram for firmware must be pinned to the specified
+ *place on the VRAM, so reserve it early.
+ */
+ r = amdgpu_fw_reserve_vram_init(adev);
+ if (r) {
+ return r;
+ }
+
r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->stolen_vga_memory,
@@ -1479,7 +1563,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
job->vm_needs_flush = vm_needs_flush;
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
- AMDGPU_FENCE_OWNER_UNDEFINED);
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ false);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
@@ -1571,7 +1656,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
- AMDGPU_FENCE_OWNER_UNDEFINED);
+ AMDGPU_FENCE_OWNER_UNDEFINED, false);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 7abae6867339..abd4084982a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -58,6 +58,12 @@ struct amdgpu_mman {
struct amd_sched_entity entity;
};
+struct amdgpu_copy_mem {
+ struct ttm_buffer_object *bo;
+ struct ttm_mem_reg *mem;
+ unsigned long offset;
+};
+
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
@@ -72,6 +78,12 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
struct reservation_object *resv,
struct dma_fence **fence, bool direct_submit,
bool vm_needs_flush);
+int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ struct amdgpu_copy_mem *src,
+ struct amdgpu_copy_mem *dst,
+ uint64_t size,
+ struct reservation_object *resv,
+ struct dma_fence **f);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint64_t src_data,
struct reservation_object *resv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index b46280c1279f..2918de2f39ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -648,7 +648,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
uint32_t allocated = 0;
uint32_t tmp, handle = 0;
uint32_t *size = &tmp;
- int i, r, idx = 0;
+ int i, r = 0, idx = 0;
p->job->vm = NULL;
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c
index 45ac91861965..7f7097931c6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c
@@ -25,30 +25,26 @@
#include "amdgpu_vf_error.h"
#include "mxgpu_ai.h"
-#define AMDGPU_VF_ERROR_ENTRY_SIZE 16
-
-/* struct error_entry - amdgpu VF error information. */
-struct amdgpu_vf_error_buffer {
- int read_count;
- int write_count;
- uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE];
- uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE];
- uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
-};
-
-struct amdgpu_vf_error_buffer admgpu_vf_errors;
-
-
-void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data)
+void amdgpu_vf_error_put(struct amdgpu_device *adev,
+ uint16_t sub_error_code,
+ uint16_t error_flags,
+ uint64_t error_data)
{
int index;
- uint16_t error_code = AMDGIM_ERROR_CODE(AMDGIM_ERROR_CATEGORY_VF, sub_error_code);
+ uint16_t error_code;
- index = admgpu_vf_errors.write_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
- admgpu_vf_errors.code [index] = error_code;
- admgpu_vf_errors.flags [index] = error_flags;
- admgpu_vf_errors.data [index] = error_data;
- admgpu_vf_errors.write_count ++;
+ if (!amdgpu_sriov_vf(adev))
+ return;
+
+ error_code = AMDGIM_ERROR_CODE(AMDGIM_ERROR_CATEGORY_VF, sub_error_code);
+
+ mutex_lock(&adev->virt.vf_errors.lock);
+ index = adev->virt.vf_errors.write_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
+ adev->virt.vf_errors.code [index] = error_code;
+ adev->virt.vf_errors.flags [index] = error_flags;
+ adev->virt.vf_errors.data [index] = error_data;
+ adev->virt.vf_errors.write_count ++;
+ mutex_unlock(&adev->virt.vf_errors.lock);
}
@@ -58,7 +54,8 @@ void amdgpu_vf_error_trans_all(struct amdgpu_device *adev)
u32 data1, data2, data3;
int index;
- if ((NULL == adev) || (!amdgpu_sriov_vf(adev)) || (!adev->virt.ops) || (!adev->virt.ops->trans_msg)) {
+ if ((NULL == adev) || (!amdgpu_sriov_vf(adev)) ||
+ (!adev->virt.ops) || (!adev->virt.ops->trans_msg)) {
return;
}
/*
@@ -68,18 +65,22 @@ void amdgpu_vf_error_trans_all(struct amdgpu_device *adev)
return;
}
*/
+
+ mutex_lock(&adev->virt.vf_errors.lock);
/* The errors are overlay of array, correct read_count as full. */
- if (admgpu_vf_errors.write_count - admgpu_vf_errors.read_count > AMDGPU_VF_ERROR_ENTRY_SIZE) {
- admgpu_vf_errors.read_count = admgpu_vf_errors.write_count - AMDGPU_VF_ERROR_ENTRY_SIZE;
+ if (adev->virt.vf_errors.write_count - adev->virt.vf_errors.read_count > AMDGPU_VF_ERROR_ENTRY_SIZE) {
+ adev->virt.vf_errors.read_count = adev->virt.vf_errors.write_count - AMDGPU_VF_ERROR_ENTRY_SIZE;
}
- while (admgpu_vf_errors.read_count < admgpu_vf_errors.write_count) {
- index =admgpu_vf_errors.read_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
- data1 = AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX (admgpu_vf_errors.code[index], admgpu_vf_errors.flags[index]);
- data2 = admgpu_vf_errors.data[index] & 0xFFFFFFFF;
- data3 = (admgpu_vf_errors.data[index] >> 32) & 0xFFFFFFFF;
+ while (adev->virt.vf_errors.read_count < adev->virt.vf_errors.write_count) {
+ index =adev->virt.vf_errors.read_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
+ data1 = AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX(adev->virt.vf_errors.code[index],
+ adev->virt.vf_errors.flags[index]);
+ data2 = adev->virt.vf_errors.data[index] & 0xFFFFFFFF;
+ data3 = (adev->virt.vf_errors.data[index] >> 32) & 0xFFFFFFFF;
adev->virt.ops->trans_msg(adev, IDH_LOG_VF_ERROR, data1, data2, data3);
- admgpu_vf_errors.read_count ++;
+ adev->virt.vf_errors.read_count ++;
}
+ mutex_unlock(&adev->virt.vf_errors.lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h
index 2a3278ec76ba..6436bd053325 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h
@@ -56,7 +56,10 @@ enum AMDGIM_ERROR_CATEGORY {
AMDGIM_ERROR_CATEGORY_MAX
};
-void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data);
+void amdgpu_vf_error_put(struct amdgpu_device *adev,
+ uint16_t sub_error_code,
+ uint16_t error_flags,
+ uint64_t error_data);
void amdgpu_vf_error_trans_all (struct amdgpu_device *adev);
#endif /* __VF_ERROR_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index ab05121b9272..6738df836a70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -22,7 +22,7 @@
*/
#include "amdgpu.h"
-#define MAX_KIQ_REG_WAIT 100000
+#define MAX_KIQ_REG_WAIT 100000000 /* in usecs */
int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
{
@@ -114,27 +114,25 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
{
signed long r;
- uint32_t val;
- struct dma_fence *f;
+ unsigned long flags;
+ uint32_t val, seq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_rreg);
- mutex_lock(&kiq->ring_mutex);
+ spin_lock_irqsave(&kiq->ring_lock, flags);
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_rreg(ring, reg);
- amdgpu_fence_emit(ring, &f);
+ amdgpu_fence_emit_polling(ring, &seq);
amdgpu_ring_commit(ring);
- mutex_unlock(&kiq->ring_mutex);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
- r = dma_fence_wait_timeout(f, false, msecs_to_jiffies(MAX_KIQ_REG_WAIT));
- dma_fence_put(f);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
if (r < 1) {
- DRM_ERROR("wait for kiq fence error: %ld.\n", r);
+ DRM_ERROR("wait for kiq fence error: %ld\n", r);
return ~0;
}
-
val = adev->wb.wb[adev->virt.reg_val_offs];
return val;
@@ -143,23 +141,23 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
{
signed long r;
- struct dma_fence *f;
+ unsigned long flags;
+ uint32_t seq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_wreg);
- mutex_lock(&kiq->ring_mutex);
+ spin_lock_irqsave(&kiq->ring_lock, flags);
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_wreg(ring, reg, v);
- amdgpu_fence_emit(ring, &f);
+ amdgpu_fence_emit_polling(ring, &seq);
amdgpu_ring_commit(ring);
- mutex_unlock(&kiq->ring_mutex);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
- r = dma_fence_wait_timeout(f, false, msecs_to_jiffies(MAX_KIQ_REG_WAIT));
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
if (r < 1)
- DRM_ERROR("wait for kiq fence error: %ld.\n", r);
- dma_fence_put(f);
+ DRM_ERROR("wait for kiq fence error: %ld\n", r);
}
/**
@@ -274,3 +272,80 @@ void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
(void *)&adev->virt.mm_table.cpu_addr);
adev->virt.mm_table.gpu_addr = 0;
}
+
+
+int amdgpu_virt_fw_reserve_get_checksum(void *obj,
+ unsigned long obj_size,
+ unsigned int key,
+ unsigned int chksum)
+{
+ unsigned int ret = key;
+ unsigned long i = 0;
+ unsigned char *pos;
+
+ pos = (char *)obj;
+ /* calculate checksum */
+ for (i = 0; i < obj_size; ++i)
+ ret += *(pos + i);
+ /* minus the chksum itself */
+ pos = (char *)&chksum;
+ for (i = 0; i < sizeof(chksum); ++i)
+ ret -= *(pos + i);
+ return ret;
+}
+
+void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
+{
+ uint32_t pf2vf_ver = 0;
+ uint32_t pf2vf_size = 0;
+ uint32_t checksum = 0;
+ uint32_t checkval;
+ char *str;
+
+ adev->virt.fw_reserve.p_pf2vf = NULL;
+ adev->virt.fw_reserve.p_vf2pf = NULL;
+
+ if (adev->fw_vram_usage.va != NULL) {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amdgim_pf2vf_info_header *)(
+ adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
+ pf2vf_ver = adev->virt.fw_reserve.p_pf2vf->version;
+ AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
+ AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
+
+ /* pf2vf message must be in 4K */
+ if (pf2vf_size > 0 && pf2vf_size < 4096) {
+ checkval = amdgpu_virt_fw_reserve_get_checksum(
+ adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
+ adev->virt.fw_reserve.checksum_key, checksum);
+ if (checkval == checksum) {
+ adev->virt.fw_reserve.p_vf2pf =
+ ((void *)adev->virt.fw_reserve.p_pf2vf +
+ pf2vf_size);
+ memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
+ sizeof(amdgim_vf2pf_info));
+ AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
+ AMDGPU_FW_VRAM_VF2PF_VER);
+ AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
+ sizeof(amdgim_vf2pf_info));
+ AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
+ &str);
+#ifdef MODULE
+ if (THIS_MODULE->version != NULL)
+ strcpy(str, THIS_MODULE->version);
+ else
+#endif
+ strcpy(str, "N/A");
+ AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
+ 0);
+ AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
+ amdgpu_virt_fw_reserve_get_checksum(
+ adev->virt.fw_reserve.p_vf2pf,
+ pf2vf_size,
+ adev->virt.fw_reserve.checksum_key, 0));
+ }
+ }
+ }
+}
+
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index afcfb8bcfb65..b89d37fc406f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -36,6 +36,18 @@ struct amdgpu_mm_table {
uint64_t gpu_addr;
};
+#define AMDGPU_VF_ERROR_ENTRY_SIZE 16
+
+/* struct error_entry - amdgpu VF error information. */
+struct amdgpu_vf_error_buffer {
+ struct mutex lock;
+ int read_count;
+ int write_count;
+ uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE];
+ uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE];
+ uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
+};
+
/**
* struct amdgpu_virt_ops - amdgpu device virt operations
*/
@@ -46,6 +58,179 @@ struct amdgpu_virt_ops {
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
};
+/*
+ * Firmware Reserve Frame buffer
+ */
+struct amdgpu_virt_fw_reserve {
+ struct amdgim_pf2vf_info_header *p_pf2vf;
+ struct amdgim_vf2pf_info_header *p_vf2pf;
+ unsigned int checksum_key;
+};
+/*
+ * Defination between PF and VF
+ * Structures forcibly aligned to 4 to keep the same style as PF.
+ */
+#define AMDGIM_DATAEXCHANGE_OFFSET (64 * 1024)
+
+#define AMDGIM_GET_STRUCTURE_RESERVED_SIZE(total, u8, u16, u32, u64) \
+ (total - (((u8)+3) / 4 + ((u16)+1) / 2 + (u32) + (u64)*2))
+
+enum AMDGIM_FEATURE_FLAG {
+ /* GIM supports feature of Error log collecting */
+ AMDGIM_FEATURE_ERROR_LOG_COLLECT = 0x1,
+ /* GIM supports feature of loading uCodes */
+ AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
+};
+
+struct amdgim_pf2vf_info_header {
+ /* the total structure size in byte. */
+ uint32_t size;
+ /* version of this structure, written by the GIM */
+ uint32_t version;
+} __aligned(4);
+struct amdgim_pf2vf_info_v1 {
+ /* header contains size and version */
+ struct amdgim_pf2vf_info_header header;
+ /* max_width * max_height */
+ unsigned int uvd_enc_max_pixels_count;
+ /* 16x16 pixels/sec, codec independent */
+ unsigned int uvd_enc_max_bandwidth;
+ /* max_width * max_height */
+ unsigned int vce_enc_max_pixels_count;
+ /* 16x16 pixels/sec, codec independent */
+ unsigned int vce_enc_max_bandwidth;
+ /* MEC FW position in kb from the start of visible frame buffer */
+ unsigned int mecfw_kboffset;
+ /* The features flags of the GIM driver supports. */
+ unsigned int feature_flags;
+ /* use private key from mailbox 2 to create chueksum */
+ unsigned int checksum;
+} __aligned(4);
+
+struct amdgim_pf2vf_info_v2 {
+ /* header contains size and version */
+ struct amdgim_pf2vf_info_header header;
+ /* use private key from mailbox 2 to create chueksum */
+ uint32_t checksum;
+ /* The features flags of the GIM driver supports. */
+ uint32_t feature_flags;
+ /* max_width * max_height */
+ uint32_t uvd_enc_max_pixels_count;
+ /* 16x16 pixels/sec, codec independent */
+ uint32_t uvd_enc_max_bandwidth;
+ /* max_width * max_height */
+ uint32_t vce_enc_max_pixels_count;
+ /* 16x16 pixels/sec, codec independent */
+ uint32_t vce_enc_max_bandwidth;
+ /* MEC FW position in kb from the start of VF visible frame buffer */
+ uint64_t mecfw_kboffset;
+ /* MEC FW size in KB */
+ uint32_t mecfw_ksize;
+ /* UVD FW position in kb from the start of VF visible frame buffer */
+ uint64_t uvdfw_kboffset;
+ /* UVD FW size in KB */
+ uint32_t uvdfw_ksize;
+ /* VCE FW position in kb from the start of VF visible frame buffer */
+ uint64_t vcefw_kboffset;
+ /* VCE FW size in KB */
+ uint32_t vcefw_ksize;
+ uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amdgim_pf2vf_info_header)/sizeof(uint32_t)), 3)];
+} __aligned(4);
+
+
+struct amdgim_vf2pf_info_header {
+ /* the total structure size in byte. */
+ uint32_t size;
+ /*version of this structure, written by the guest */
+ uint32_t version;
+} __aligned(4);
+
+struct amdgim_vf2pf_info_v1 {
+ /* header contains size and version */
+ struct amdgim_vf2pf_info_header header;
+ /* driver version */
+ char driver_version[64];
+ /* driver certification, 1=WHQL, 0=None */
+ unsigned int driver_cert;
+ /* guest OS type and version: need a define */
+ unsigned int os_info;
+ /* in the unit of 1M */
+ unsigned int fb_usage;
+ /* guest gfx engine usage percentage */
+ unsigned int gfx_usage;
+ /* guest gfx engine health percentage */
+ unsigned int gfx_health;
+ /* guest compute engine usage percentage */
+ unsigned int compute_usage;
+ /* guest compute engine health percentage */
+ unsigned int compute_health;
+ /* guest vce engine usage percentage. 0xffff means N/A. */
+ unsigned int vce_enc_usage;
+ /* guest vce engine health percentage. 0xffff means N/A. */
+ unsigned int vce_enc_health;
+ /* guest uvd engine usage percentage. 0xffff means N/A. */
+ unsigned int uvd_enc_usage;
+ /* guest uvd engine usage percentage. 0xffff means N/A. */
+ unsigned int uvd_enc_health;
+ unsigned int checksum;
+} __aligned(4);
+
+struct amdgim_vf2pf_info_v2 {
+ /* header contains size and version */
+ struct amdgim_vf2pf_info_header header;
+ uint32_t checksum;
+ /* driver version */
+ uint8_t driver_version[64];
+ /* driver certification, 1=WHQL, 0=None */
+ uint32_t driver_cert;
+ /* guest OS type and version: need a define */
+ uint32_t os_info;
+ /* in the unit of 1M */
+ uint32_t fb_usage;
+ /* guest gfx engine usage percentage */
+ uint32_t gfx_usage;
+ /* guest gfx engine health percentage */
+ uint32_t gfx_health;
+ /* guest compute engine usage percentage */
+ uint32_t compute_usage;
+ /* guest compute engine health percentage */
+ uint32_t compute_health;
+ /* guest vce engine usage percentage. 0xffff means N/A. */
+ uint32_t vce_enc_usage;
+ /* guest vce engine health percentage. 0xffff means N/A. */
+ uint32_t vce_enc_health;
+ /* guest uvd engine usage percentage. 0xffff means N/A. */
+ uint32_t uvd_enc_usage;
+ /* guest uvd engine usage percentage. 0xffff means N/A. */
+ uint32_t uvd_enc_health;
+ uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amdgim_vf2pf_info_header)/sizeof(uint32_t)), 0)];
+} __aligned(4);
+
+#define AMDGPU_FW_VRAM_VF2PF_VER 2
+typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ;
+
+#define AMDGPU_FW_VRAM_VF2PF_WRITE(adev, field, val) \
+ do { \
+ ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field = (val); \
+ } while (0)
+
+#define AMDGPU_FW_VRAM_VF2PF_READ(adev, field, val) \
+ do { \
+ (*val) = ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field; \
+ } while (0)
+
+#define AMDGPU_FW_VRAM_PF2VF_READ(adev, field, val) \
+ do { \
+ if (!adev->virt.fw_reserve.p_pf2vf) \
+ *(val) = 0; \
+ else { \
+ if (adev->virt.fw_reserve.p_pf2vf->version == 1) \
+ *(val) = ((struct amdgim_pf2vf_info_v1 *)adev->virt.fw_reserve.p_pf2vf)->field; \
+ if (adev->virt.fw_reserve.p_pf2vf->version == 2) \
+ *(val) = ((struct amdgim_pf2vf_info_v2 *)adev->virt.fw_reserve.p_pf2vf)->field; \
+ } \
+ } while (0)
+
/* GPU virtualization */
struct amdgpu_virt {
uint32_t caps;
@@ -59,6 +244,8 @@ struct amdgpu_virt {
struct work_struct flr_work;
struct amdgpu_mm_table mm_table;
const struct amdgpu_virt_ops *ops;
+ struct amdgpu_vf_error_buffer vf_errors;
+ struct amdgpu_virt_fw_reserve fw_reserve;
};
#define AMDGPU_CSA_SIZE (8 * 1024)
@@ -101,5 +288,9 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job);
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
+int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
+ unsigned int key,
+ unsigned int chksum);
+void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index bbcc67038203..c8c26f21993c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -328,9 +328,10 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
AMDGPU_GEM_CREATE_SHADOW);
if (vm->pte_support_ats) {
- init_value = AMDGPU_PTE_SYSTEM;
+ init_value = AMDGPU_PTE_DEFAULT_ATC;
if (level != adev->vm_manager.num_level - 1)
init_value |= AMDGPU_PDE_PTE;
+
}
/* walk over the address space and allocate the page tables */
@@ -1034,7 +1035,7 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int r;
amdgpu_sync_create(&sync);
- amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner);
+ amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
r = amdgpu_sync_wait(&sync, true);
amdgpu_sync_free(&sync);
@@ -1175,11 +1176,11 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
amdgpu_ring_pad_ib(ring, params.ib);
amdgpu_sync_resv(adev, &job->sync,
parent->base.bo->tbo.resv,
- AMDGPU_FENCE_OWNER_VM);
+ AMDGPU_FENCE_OWNER_VM, false);
if (shadow)
amdgpu_sync_resv(adev, &job->sync,
shadow->tbo.resv,
- AMDGPU_FENCE_OWNER_VM);
+ AMDGPU_FENCE_OWNER_VM, false);
WARN_ON(params.ib->length_dw > ndw);
r = amdgpu_job_submit(job, ring, &vm->entity,
@@ -1243,7 +1244,7 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm,
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
- int r;
+ int r = 0;
spin_lock(&vm->status_lock);
while (!list_empty(&vm->relocated)) {
@@ -1643,7 +1644,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
goto error_free;
r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
- owner);
+ owner, false);
if (r)
goto error_free;
@@ -1698,6 +1699,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct drm_mm_node *nodes,
struct dma_fence **fence)
{
+ unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
uint64_t pfn, start = mapping->start;
int r;
@@ -1732,6 +1734,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
do {
+ dma_addr_t *dma_addr = NULL;
uint64_t max_entries;
uint64_t addr, last;
@@ -1745,15 +1748,32 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
if (pages_addr) {
+ uint64_t count;
+
max_entries = min(max_entries, 16ull * 1024ull);
- addr = 0;
+ for (count = 1; count < max_entries; ++count) {
+ uint64_t idx = pfn + count;
+
+ if (pages_addr[idx] !=
+ (pages_addr[idx - 1] + PAGE_SIZE))
+ break;
+ }
+
+ if (count < min_linear_pages) {
+ addr = pfn << PAGE_SHIFT;
+ dma_addr = pages_addr;
+ } else {
+ addr = pages_addr[pfn];
+ max_entries = count;
+ }
+
} else if (flags & AMDGPU_PTE_VALID) {
addr += adev->vm_manager.vram_base_offset;
+ addr += pfn << PAGE_SHIFT;
}
- addr += pfn << PAGE_SHIFT;
last = min((uint64_t)mapping->last, start + max_entries - 1);
- r = amdgpu_vm_bo_update_mapping(adev, exclusive, pages_addr, vm,
+ r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
start, last, flags, addr,
fence);
if (r)
@@ -2017,7 +2037,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
list_del(&mapping->list);
if (vm->pte_support_ats)
- init_pte_value = AMDGPU_PTE_SYSTEM;
+ init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
mapping->start, mapping->last,
@@ -2541,7 +2561,8 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
* @adev: amdgpu_device pointer
* @fragment_size_default: the default fragment size if it's set auto
*/
-void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, uint32_t fragment_size_default)
+void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev,
+ uint32_t fragment_size_default)
{
if (amdgpu_vm_fragment_size == -1)
adev->vm_manager.fragment_size = fragment_size_default;
@@ -2555,7 +2576,8 @@ void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, uint32_t fragment_s
* @adev: amdgpu_device pointer
* @vm_size: the default vm size if it's set auto
*/
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, uint32_t fragment_size_default)
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size,
+ uint32_t fragment_size_default)
{
/* adjust vm size firstly */
if (amdgpu_vm_size == -1)
@@ -2627,7 +2649,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (adev->asic_type == CHIP_RAVEN) {
vm->pte_support_ats = true;
- init_pde_value = AMDGPU_PTE_SYSTEM | AMDGPU_PDE_PTE;
+ init_pde_value = AMDGPU_PTE_DEFAULT_ATC
+ | AMDGPU_PDE_PTE;
+
}
} else
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
@@ -2682,6 +2706,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
INIT_KFIFO(vm->faults);
+ vm->fault_credit = 16;
return 0;
@@ -2734,8 +2759,9 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
struct amdgpu_bo_va_mapping *mapping, *tmp;
bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
+ struct amdgpu_bo *root;
u64 fault;
- int i;
+ int i, r;
/* Clear pending page faults from IH when the VM is destroyed */
while (kfifo_get(&vm->faults, &fault))
@@ -2770,13 +2796,51 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
}
- amdgpu_vm_free_levels(&vm->root);
+ root = amdgpu_bo_ref(vm->root.base.bo);
+ r = amdgpu_bo_reserve(root, true);
+ if (r) {
+ dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
+ } else {
+ amdgpu_vm_free_levels(&vm->root);
+ amdgpu_bo_unreserve(root);
+ }
+ amdgpu_bo_unref(&root);
dma_fence_put(vm->last_update);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
amdgpu_vm_free_reserved_vmid(adev, vm, i);
}
/**
+ * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
+ *
+ * @adev: amdgpu_device pointer
+ * @pasid: PASID do identify the VM
+ *
+ * This function is expected to be called in interrupt context. Returns
+ * true if there was fault credit, false otherwise
+ */
+bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
+ unsigned int pasid)
+{
+ struct amdgpu_vm *vm;
+
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ if (!vm)
+ /* VM not found, can't track fault credit */
+ return true;
+
+ /* No lock needed. only accessed by IRQ handler */
+ if (!vm->fault_credit)
+ /* Too many faults in this VM */
+ return false;
+
+ vm->fault_credit--;
+ return true;
+}
+
+/**
* amdgpu_vm_manager_init - init the VM manager
*
* @adev: amdgpu_device pointer
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 0af090667dfc..aa914256b4bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -73,6 +73,16 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57)
#define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL)
+/* For Raven */
+#define AMDGPU_MTYPE_CC 2
+
+#define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \
+ | AMDGPU_PTE_SNOOPED \
+ | AMDGPU_PTE_EXECUTABLE \
+ | AMDGPU_PTE_READABLE \
+ | AMDGPU_PTE_WRITEABLE \
+ | AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_CC))
+
/* How to programm VM fault handling */
#define AMDGPU_VM_FAULT_STOP_NEVER 0
#define AMDGPU_VM_FAULT_STOP_FIRST 1
@@ -165,8 +175,11 @@ struct amdgpu_vm {
/* Flag to indicate ATS support from PTE for GFX9 */
bool pte_support_ats;
- /* Up to 128 pending page faults */
+ /* Up to 128 pending retry page faults */
DECLARE_KFIFO(faults, u64, 128);
+
+ /* Limit non-retry fault storms */
+ unsigned int fault_credit;
};
struct amdgpu_vm_id {
@@ -244,6 +257,8 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int vm_context, unsigned int pasid);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
+ unsigned int pasid);
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry);
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 68ce1bdaf2fc..68b505c768ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -6365,7 +6365,6 @@ static int ci_dpm_sw_fini(void *handle)
flush_work(&adev->pm.dpm.thermal.work);
mutex_lock(&adev->pm.mutex);
- amdgpu_pm_sysfs_fini(adev);
ci_dpm_fini(adev);
mutex_unlock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 07d3d895da10..a870b354e3f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -237,8 +237,23 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
*/
static bool cik_ih_prescreen_iv(struct amdgpu_device *adev)
{
- /* Process all interrupts */
- return true;
+ u32 ring_index = adev->irq.ih.rptr >> 2;
+ u16 pasid;
+
+ switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
+ case 146:
+ case 147:
+ pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
+ if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
+ return true;
+ break;
+ default:
+ /* Not a VM fault */
+ return true;
+ }
+
+ adev->irq.ih.rptr += 16;
+ return false;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index b6cdf4afaf46..fa61d649bb44 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -216,8 +216,23 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev)
*/
static bool cz_ih_prescreen_iv(struct amdgpu_device *adev)
{
- /* Process all interrupts */
- return true;
+ u32 ring_index = adev->irq.ih.rptr >> 2;
+ u16 pasid;
+
+ switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
+ case 146:
+ case 147:
+ pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
+ if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
+ return true;
+ break;
+ default:
+ /* Not a VM fault */
+ return true;
+ }
+
+ adev->irq.ih.rptr += 16;
+ return false;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index b9ee9073cb0d..a8829af120c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -288,7 +288,7 @@ dce_virtual_encoder(struct drm_connector *connector)
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -298,7 +298,7 @@ dce_virtual_encoder(struct drm_connector *connector)
/* pick the first one */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index dfc10b1baea0..b8002ac3e536 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -20,6 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include <linux/kernel.h>
#include <linux/firmware.h>
#include <drm/drmP.h>
#include "amdgpu.h"
@@ -3952,10 +3953,10 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
adev->gfx.rlc.reg_list_format_size_bytes >> 2,
unique_indices,
&indices_count,
- sizeof(unique_indices) / sizeof(int),
+ ARRAY_SIZE(unique_indices),
indirect_start_offsets,
&offset_count,
- sizeof(indirect_start_offsets)/sizeof(int));
+ ARRAY_SIZE(indirect_start_offsets));
/* save and restore list */
WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
@@ -3977,14 +3978,14 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
/* starting offsets starts */
WREG32(mmRLC_GPM_SCRATCH_ADDR,
adev->gfx.rlc.starting_offsets_start);
- for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++)
+ for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
WREG32(mmRLC_GPM_SCRATCH_DATA,
indirect_start_offsets[i]);
/* unique indices */
temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
data = mmRLC_SRM_INDEX_CNTL_DATA_0;
- for (i = 0; i < sizeof(unique_indices) / sizeof(int); i++) {
+ for (i = 0; i < ARRAY_SIZE(unique_indices); i++) {
if (unique_indices[i] != 0) {
WREG32(temp + i, unique_indices[i] & 0x3FFFF);
WREG32(data + i, unique_indices[i] >> 20);
@@ -4132,18 +4133,12 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
gfx_v8_0_rlc_reset(adev);
gfx_v8_0_init_pg(adev);
- if (!adev->pp_enabled) {
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
- /* legacy rlc firmware loading */
- r = gfx_v8_0_rlc_load_microcode(adev);
- if (r)
- return r;
- } else {
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_RLC_G);
- if (r)
- return -EINVAL;
- }
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+ /* legacy rlc firmware loading */
+ r = gfx_v8_0_rlc_load_microcode(adev);
+ if (r)
+ return r;
}
gfx_v8_0_rlc_start(adev);
@@ -4959,43 +4954,15 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
if (!(adev->flags & AMD_IS_APU))
gfx_v8_0_enable_gui_idle_interrupt(adev, false);
- if (!adev->pp_enabled) {
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
/* legacy firmware loading */
- r = gfx_v8_0_cp_gfx_load_microcode(adev);
- if (r)
- return r;
+ r = gfx_v8_0_cp_gfx_load_microcode(adev);
+ if (r)
+ return r;
- r = gfx_v8_0_cp_compute_load_microcode(adev);
- if (r)
- return r;
- } else {
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_CP_CE);
- if (r)
- return -EINVAL;
-
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_CP_PFP);
- if (r)
- return -EINVAL;
-
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_CP_ME);
- if (r)
- return -EINVAL;
-
- if (adev->asic_type == CHIP_TOPAZ) {
- r = gfx_v8_0_cp_compute_load_microcode(adev);
- if (r)
- return r;
- } else {
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_CP_MEC1);
- if (r)
- return -EINVAL;
- }
- }
+ r = gfx_v8_0_cp_compute_load_microcode(adev);
+ if (r)
+ return r;
}
r = gfx_v8_0_cp_gfx_resume(adev);
@@ -6018,7 +5985,6 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
{
uint32_t msg_id, pp_state = 0;
uint32_t pp_support_state = 0;
- void *pp_handle = adev->powerplay.pp_handle;
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
@@ -6036,7 +6002,8 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CG,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
@@ -6057,7 +6024,8 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_MG,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
return 0;
@@ -6069,7 +6037,6 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
uint32_t msg_id, pp_state = 0;
uint32_t pp_support_state = 0;
- void *pp_handle = adev->powerplay.pp_handle;
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
@@ -6087,7 +6054,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CG,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
@@ -6106,7 +6074,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_3D,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
@@ -6127,7 +6096,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_MG,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
@@ -6142,7 +6112,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_RLC,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
@@ -6156,7 +6127,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CP,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
return 0;
@@ -6423,6 +6395,104 @@ static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
}
+static void gfx_v8_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
+ bool acquire)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int pipe_num, tmp, reg;
+ int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
+
+ pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
+
+ /* first me only has 2 entries, GFX and HP3D */
+ if (ring->me > 0)
+ pipe_num -= 2;
+
+ reg = mmSPI_WCL_PIPE_PERCENT_GFX + pipe_num;
+ tmp = RREG32(reg);
+ tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
+ WREG32(reg, tmp);
+}
+
+static void gfx_v8_0_pipe_reserve_resources(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ bool acquire)
+{
+ int i, pipe;
+ bool reserve;
+ struct amdgpu_ring *iring;
+
+ mutex_lock(&adev->gfx.pipe_reserve_mutex);
+ pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
+ if (acquire)
+ set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+ else
+ clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+
+ if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
+ /* Clear all reservations - everyone reacquires all resources */
+ for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
+ gfx_v8_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
+ true);
+
+ for (i = 0; i < adev->gfx.num_compute_rings; ++i)
+ gfx_v8_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
+ true);
+ } else {
+ /* Lower all pipes without a current reservation */
+ for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
+ iring = &adev->gfx.gfx_ring[i];
+ pipe = amdgpu_gfx_queue_to_bit(adev,
+ iring->me,
+ iring->pipe,
+ 0);
+ reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+ gfx_v8_0_ring_set_pipe_percent(iring, reserve);
+ }
+
+ for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
+ iring = &adev->gfx.compute_ring[i];
+ pipe = amdgpu_gfx_queue_to_bit(adev,
+ iring->me,
+ iring->pipe,
+ 0);
+ reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+ gfx_v8_0_ring_set_pipe_percent(iring, reserve);
+ }
+ }
+
+ mutex_unlock(&adev->gfx.pipe_reserve_mutex);
+}
+
+static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ bool acquire)
+{
+ uint32_t pipe_priority = acquire ? 0x2 : 0x0;
+ uint32_t queue_priority = acquire ? 0xf : 0x0;
+
+ mutex_lock(&adev->srbm_mutex);
+ vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ WREG32(mmCP_HQD_PIPE_PRIORITY, pipe_priority);
+ WREG32(mmCP_HQD_QUEUE_PRIORITY, queue_priority);
+
+ vi_srbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+}
+static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring,
+ enum amd_sched_priority priority)
+{
+ struct amdgpu_device *adev = ring->adev;
+ bool acquire = priority == AMD_SCHED_PRIORITY_HIGH_HW;
+
+ if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
+ return;
+
+ gfx_v8_0_hqd_set_priority(adev, ring, acquire);
+ gfx_v8_0_pipe_reserve_resources(adev, ring, acquire);
+}
+
static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
u64 addr, u64 seq,
unsigned flags)
@@ -6868,6 +6938,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .set_priority = gfx_v8_0_ring_set_priority_compute,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
@@ -7076,7 +7147,7 @@ static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
{
uint64_t ce_payload_addr;
int cnt_ce;
- static union {
+ union {
struct vi_ce_ib_state regular;
struct vi_ce_ib_state_chained_ib chained;
} ce_payload = {};
@@ -7105,7 +7176,7 @@ static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring)
{
uint64_t de_payload_addr, gds_addr, csa_addr;
int cnt_de;
- static union {
+ union {
struct vi_de_ib_state regular;
struct vi_de_ib_state_chained_ib chained;
} de_payload = {};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index deeaee1457ef..7f15bb2c5233 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -20,6 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include <linux/kernel.h>
#include <linux/firmware.h>
#include <drm/drmP.h>
#include "amdgpu.h"
@@ -1730,10 +1731,10 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
adev->gfx.rlc.reg_list_format_size_bytes >> 2,
unique_indirect_regs,
&unique_indirect_reg_count,
- sizeof(unique_indirect_regs)/sizeof(int),
+ ARRAY_SIZE(unique_indirect_regs),
indirect_start_offsets,
&indirect_start_offsets_count,
- sizeof(indirect_start_offsets)/sizeof(int));
+ ARRAY_SIZE(indirect_start_offsets));
/* enable auto inc in case it is disabled */
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
@@ -1770,12 +1771,12 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
/* write the starting offsets to RLC scratch ram */
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
adev->gfx.rlc.starting_offsets_start);
- for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++)
+ for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
indirect_start_offsets[i]);
/* load unique indirect regs*/
- for (i = 0; i < sizeof(unique_indirect_regs)/sizeof(int); i++) {
+ for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) + i,
unique_indirect_regs[i] & 0x3FFFF);
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) + i,
@@ -3583,7 +3584,7 @@ static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask, reg_mem_engine;
- struct nbio_hdp_flush_reg *nbio_hf_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg;
if (ring->adev->flags & AMD_IS_APU)
nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
@@ -3806,7 +3807,7 @@ static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
{
- static struct v9_ce_ib_state ce_payload = {0};
+ struct v9_ce_ib_state ce_payload = {0};
uint64_t csa_addr;
int cnt;
@@ -3825,7 +3826,7 @@ static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
{
- static struct v9_de_ib_state de_payload = {0};
+ struct v9_de_ib_state de_payload = {0};
uint64_t csa_addr, gds_addr;
int cnt;
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 65ed6d3a8f05..bd592cb39f37 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -216,8 +216,23 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev)
*/
static bool iceland_ih_prescreen_iv(struct amdgpu_device *adev)
{
- /* Process all interrupts */
- return true;
+ u32 ring_index = adev->irq.ih.rptr >> 2;
+ u16 pasid;
+
+ switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
+ case 146:
+ case 147:
+ pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
+ if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
+ return true;
+ break;
+ default:
+ /* Not a VM fault */
+ return true;
+ }
+
+ adev->irq.ih.rptr += 16;
+ return false;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index b57399a462c2..f33d1ffdb20b 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2969,16 +2969,10 @@ static int kv_dpm_late_init(void *handle)
{
/* powerdown unused blocks for now */
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int ret;
if (!amdgpu_dpm)
return 0;
- /* init the sysfs and debugfs files late */
- ret = amdgpu_pm_sysfs_init(adev);
- if (ret)
- return ret;
-
kv_dpm_powergate_acp(adev, true);
kv_dpm_powergate_samu(adev, true);
@@ -3040,7 +3034,6 @@ static int kv_dpm_sw_fini(void *handle)
flush_work(&adev->pm.dpm.thermal.work);
mutex_lock(&adev->pm.mutex);
- amdgpu_pm_sysfs_fini(adev);
kv_dpm_fini(adev);
mutex_unlock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 2812d88a8bdd..b4906d2f30d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -183,6 +183,12 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
return r;
}
+ /* Retrieve checksum from mailbox2 */
+ if (req == IDH_REQ_GPU_INIT_ACCESS) {
+ adev->virt.fw_reserve.checksum_key =
+ RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
+ }
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 045988b18bc3..904a1bab9b9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -215,31 +215,27 @@ void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
*flags |= AMD_CG_SUPPORT_BIF_LS;
}
-struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
-struct nbio_pcie_index_data nbio_v6_1_pcie_index_data;
+const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
+ .hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ),
+ .hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE),
+ .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
+ .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
+ .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
+ .ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK,
+ .ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK,
+ .ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK,
+ .ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK,
+ .ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK,
+ .ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK,
+ .ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK,
+ .ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
+ .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
+};
-int nbio_v6_1_init(struct amdgpu_device *adev)
-{
- nbio_v6_1_hdp_flush_reg.hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
- nbio_v6_1_hdp_flush_reg.hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK;
-
- nbio_v6_1_pcie_index_data.index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX);
- nbio_v6_1_pcie_index_data.data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA);
-
- return 0;
-}
+const struct nbio_pcie_index_data nbio_v6_1_pcie_index_data = {
+ .index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX),
+ .data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA),
+};
void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
index 686e4b4d296a..14ca8d45a46c 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
@@ -26,8 +26,8 @@
#include "soc15_common.h"
-extern struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
-extern struct nbio_pcie_index_data nbio_v6_1_pcie_index_data;
+extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
+extern const struct nbio_pcie_index_data nbio_v6_1_pcie_index_data;
int nbio_v6_1_init(struct amdgpu_device *adev);
u32 nbio_v6_1_get_atombios_scratch_regs(struct amdgpu_device *adev,
uint32_t idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index 11b70d601922..f802b973410a 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -185,28 +185,24 @@ void nbio_v7_0_ih_control(struct amdgpu_device *adev)
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
}
-struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
-struct nbio_pcie_index_data nbio_v7_0_pcie_index_data;
+const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
+ .hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ),
+ .hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE),
+ .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
+ .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
+ .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
+ .ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK,
+ .ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK,
+ .ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK,
+ .ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK,
+ .ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK,
+ .ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK,
+ .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
+ .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK,
+ .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
+};
-int nbio_v7_0_init(struct amdgpu_device *adev)
-{
- nbio_v7_0_hdp_flush_reg.hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
- nbio_v7_0_hdp_flush_reg.hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
-
- nbio_v7_0_pcie_index_data.index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
- nbio_v7_0_pcie_index_data.data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
-
- return 0;
-}
+const struct nbio_pcie_index_data nbio_v7_0_pcie_index_data = {
+ .index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2),
+ .data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2)
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
index 054ff49427e6..df8fa90f40d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
@@ -26,8 +26,8 @@
#include "soc15_common.h"
-extern struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
-extern struct nbio_pcie_index_data nbio_v7_0_pcie_index_data;
+extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
+extern const struct nbio_pcie_index_data nbio_v7_0_pcie_index_data;
int nbio_v7_0_init(struct amdgpu_device *adev);
u32 nbio_v7_0_get_atombios_scratch_regs(struct amdgpu_device *adev,
uint32_t idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index dea7c909ca5f..4e20d91d5d50 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -257,6 +257,9 @@ int psp_v10_0_cmd_submit(struct psp_context *psp,
unsigned int psp_write_ptr_reg = 0;
struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
struct psp_ring *ring = &psp->km_ring;
+ struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
+ struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
+ ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
struct amdgpu_device *adev = psp->adev;
uint32_t ring_size_dw = ring->ring_size / 4;
uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
@@ -266,9 +269,16 @@ int psp_v10_0_cmd_submit(struct psp_context *psp,
/* Update KM RB frame pointer to new frame */
if ((psp_write_ptr_reg % ring_size_dw) == 0)
- write_frame = ring->ring_mem;
+ write_frame = ring_buffer_start;
else
- write_frame = ring->ring_mem + (psp_write_ptr_reg / rb_frame_size_dw);
+ write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
+ /* Check invalid write_frame ptr address */
+ if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
+ DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
+ ring_buffer_start, ring_buffer_end, write_frame);
+ DRM_ERROR("write_frame is pointing to address out of bounds\n");
+ return -EINVAL;
+ }
/* Initialize KM RB frame */
memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index cee5c396b277..c7bcfe8e286c 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -367,6 +367,9 @@ int psp_v3_1_cmd_submit(struct psp_context *psp,
unsigned int psp_write_ptr_reg = 0;
struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
struct psp_ring *ring = &psp->km_ring;
+ struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
+ struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
+ ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
struct amdgpu_device *adev = psp->adev;
uint32_t ring_size_dw = ring->ring_size / 4;
uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
@@ -378,9 +381,16 @@ int psp_v3_1_cmd_submit(struct psp_context *psp,
/* write_frame ptr increments by size of rb_frame in bytes */
/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
if ((psp_write_ptr_reg % ring_size_dw) == 0)
- write_frame = ring->ring_mem;
+ write_frame = ring_buffer_start;
else
- write_frame = ring->ring_mem + (psp_write_ptr_reg / rb_frame_size_dw);
+ write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
+ /* Check invalid write_frame ptr address */
+ if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
+ DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
+ ring_buffer_start, ring_buffer_end, write_frame);
+ DRM_ERROR("write_frame is pointing to address out of bounds\n");
+ return -EINVAL;
+ }
/* Initialize KM RB frame */
memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index acdee3a4602c..67f375bfe452 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -561,21 +561,11 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
{
int r;
- if (!adev->pp_enabled) {
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
- r = sdma_v2_4_load_microcode(adev);
- if (r)
- return r;
- } else {
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_SDMA0);
- if (r)
- return -EINVAL;
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_SDMA1);
- if (r)
- return -EINVAL;
- }
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+ r = sdma_v2_4_load_microcode(adev);
+ if (r)
+ return r;
}
/* halt the engine before programing */
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 72f31cc7df00..6d06f8eb659f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -819,23 +819,12 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
*/
static int sdma_v3_0_start(struct amdgpu_device *adev)
{
- int r, i;
+ int r;
- if (!adev->pp_enabled) {
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
- r = sdma_v3_0_load_microcode(adev);
- if (r)
- return r;
- } else {
- for (i = 0; i < adev->sdma.num_instances; i++) {
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- (i == 0) ?
- AMDGPU_UCODE_ID_SDMA0 :
- AMDGPU_UCODE_ID_SDMA1);
- if (r)
- return -EINVAL;
- }
- }
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+ r = sdma_v3_0_load_microcode(adev);
+ if (r)
+ return r;
}
/* disable sdma engine before programing it */
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index c26d205ff3bf..46009db3d195 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -371,7 +371,7 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask = 0;
- struct nbio_hdp_flush_reg *nbio_hf_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg;
if (ring->adev->flags & AMD_IS_APU)
nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 9b8db6046271..51fd0c9a20a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -7604,11 +7604,6 @@ static int si_dpm_late_init(void *handle)
if (!amdgpu_dpm)
return 0;
- /* init the sysfs and debugfs files late */
- ret = amdgpu_pm_sysfs_init(adev);
- if (ret)
- return ret;
-
ret = si_set_temperature_range(adev);
if (ret)
return ret;
@@ -7764,7 +7759,6 @@ static int si_dpm_sw_fini(void *handle)
flush_work(&adev->pm.dpm.thermal.work);
mutex_lock(&adev->pm.mutex);
- amdgpu_pm_sysfs_fini(adev);
si_dpm_fini(adev);
mutex_unlock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 245a18aeb389..3ca9d114f630 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -101,7 +101,7 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u32 r;
- struct nbio_pcie_index_data *nbio_pcie_id;
+ const struct nbio_pcie_index_data *nbio_pcie_id;
if (adev->flags & AMD_IS_APU)
nbio_pcie_id = &nbio_v7_0_pcie_index_data;
@@ -122,7 +122,7 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags, address, data;
- struct nbio_pcie_index_data *nbio_pcie_id;
+ const struct nbio_pcie_index_data *nbio_pcie_id;
if (adev->flags & AMD_IS_APU)
nbio_pcie_id = &nbio_v7_0_pcie_index_data;
@@ -279,10 +279,7 @@ static void soc15_init_golden_registers(struct amdgpu_device *adev)
}
static u32 soc15_get_xclk(struct amdgpu_device *adev)
{
- if (adev->asic_type == CHIP_VEGA10)
- return adev->clock.spll.reference_freq/4;
- else
- return adev->clock.spll.reference_freq;
+ return adev->clock.spll.reference_freq;
}
@@ -604,21 +601,6 @@ static int soc15_common_early_init(void *handle)
(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
psp_enabled = true;
- /*
- * nbio need be used for both sdma and gfx9, but only
- * initializes once
- */
- switch(adev->asic_type) {
- case CHIP_VEGA10:
- nbio_v6_1_init(adev);
- break;
- case CHIP_RAVEN:
- nbio_v7_0_init(adev);
- break;
- default:
- return -EINVAL;
- }
-
adev->rev_id = soc15_get_rev_id(adev);
adev->external_rev_id = 0xFF;
switch (adev->asic_type) {
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 5ed00692618e..aa4e320e31f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -227,8 +227,23 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev)
*/
static bool tonga_ih_prescreen_iv(struct amdgpu_device *adev)
{
- /* Process all interrupts */
- return true;
+ u32 ring_index = adev->irq.ih.rptr >> 2;
+ u16 pasid;
+
+ switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
+ case 146:
+ case 147:
+ pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
+ if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
+ return true;
+ break;
+ default:
+ /* Not a VM fault */
+ return true;
+ }
+
+ adev->irq.ih.rptr += 16;
+ return false;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 31db356476f8..2581543b35a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -38,6 +38,8 @@
#include "vi.h"
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
+static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
+
static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v6_0_start(struct amdgpu_device *adev);
static void uvd_v6_0_stop(struct amdgpu_device *adev);
@@ -48,6 +50,18 @@ static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
bool enable);
/**
+* uvd_v6_0_enc_support - get encode support status
+*
+* @adev: amdgpu_device pointer
+*
+* Returns the current hardware encode support status
+*/
+static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
+{
+ return ((adev->asic_type >= CHIP_POLARIS10) && (adev->asic_type <= CHIP_POLARIS12));
+}
+
+/**
* uvd_v6_0_ring_get_rptr - get read pointer
*
* @ring: amdgpu_ring pointer
@@ -62,6 +76,22 @@ static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
}
/**
+ * uvd_v6_0_enc_ring_get_rptr - get enc read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware enc read pointer
+ */
+static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring == &adev->uvd.ring_enc[0])
+ return RREG32(mmUVD_RB_RPTR);
+ else
+ return RREG32(mmUVD_RB_RPTR2);
+}
+/**
* uvd_v6_0_ring_get_wptr - get write pointer
*
* @ring: amdgpu_ring pointer
@@ -76,6 +106,23 @@ static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
}
/**
+ * uvd_v6_0_enc_ring_get_wptr - get enc write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware enc write pointer
+ */
+static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring == &adev->uvd.ring_enc[0])
+ return RREG32(mmUVD_RB_WPTR);
+ else
+ return RREG32(mmUVD_RB_WPTR2);
+}
+
+/**
* uvd_v6_0_ring_set_wptr - set write pointer
*
* @ring: amdgpu_ring pointer
@@ -89,11 +136,248 @@ static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
}
+/**
+ * uvd_v6_0_enc_ring_set_wptr - set enc write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the enc write pointer to the hardware
+ */
+static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring == &adev->uvd.ring_enc[0])
+ WREG32(mmUVD_RB_WPTR,
+ lower_32_bits(ring->wptr));
+ else
+ WREG32(mmUVD_RB_WPTR2,
+ lower_32_bits(ring->wptr));
+}
+
+/**
+ * uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working
+ *
+ * @ring: the engine to test on
+ *
+ */
+static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t rptr = amdgpu_ring_get_rptr(ring);
+ unsigned i;
+ int r;
+
+ r = amdgpu_ring_alloc(ring, 16);
+ if (r) {
+ DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
+ ring->idx, r);
+ return r;
+ }
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
+ amdgpu_ring_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (amdgpu_ring_get_rptr(ring) != rptr)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < adev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ ring->idx, i);
+ } else {
+ DRM_ERROR("amdgpu: ring %d test failed\n",
+ ring->idx);
+ r = -ETIMEDOUT;
+ }
+
+ return r;
+}
+
+/**
+ * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: ring we should submit the msg to
+ * @handle: session handle to use
+ * @fence: optional fence to return
+ *
+ * Open up a stream for HW test
+ */
+static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct dma_fence **fence)
+{
+ const unsigned ib_size_dw = 16;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+ uint64_t dummy;
+ int i, r;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ if (r)
+ return r;
+
+ ib = &job->ibs[0];
+ dummy = ib->gpu_addr + 1024;
+
+ ib->length_dw = 0;
+ ib->ptr[ib->length_dw++] = 0x00000018;
+ ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
+ ib->ptr[ib->length_dw++] = handle;
+ ib->ptr[ib->length_dw++] = 0x00010000;
+ ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+ ib->ptr[ib->length_dw++] = dummy;
+
+ ib->ptr[ib->length_dw++] = 0x00000014;
+ ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
+ ib->ptr[ib->length_dw++] = 0x0000001c;
+ ib->ptr[ib->length_dw++] = 0x00000001;
+ ib->ptr[ib->length_dw++] = 0x00000000;
+
+ ib->ptr[ib->length_dw++] = 0x00000008;
+ ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
+
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+ r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+ job->fence = dma_fence_get(f);
+ if (r)
+ goto err;
+
+ amdgpu_job_free(job);
+ if (fence)
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
+ return 0;
+
+err:
+ amdgpu_job_free(job);
+ return r;
+}
+
+/**
+ * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: ring we should submit the msg to
+ * @handle: session handle to use
+ * @fence: optional fence to return
+ *
+ * Close up a stream for HW test or if userspace failed to do so
+ */
+static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
+ uint32_t handle,
+ bool direct, struct dma_fence **fence)
+{
+ const unsigned ib_size_dw = 16;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+ uint64_t dummy;
+ int i, r;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ if (r)
+ return r;
+
+ ib = &job->ibs[0];
+ dummy = ib->gpu_addr + 1024;
+
+ ib->length_dw = 0;
+ ib->ptr[ib->length_dw++] = 0x00000018;
+ ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
+ ib->ptr[ib->length_dw++] = handle;
+ ib->ptr[ib->length_dw++] = 0x00010000;
+ ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+ ib->ptr[ib->length_dw++] = dummy;
+
+ ib->ptr[ib->length_dw++] = 0x00000014;
+ ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
+ ib->ptr[ib->length_dw++] = 0x0000001c;
+ ib->ptr[ib->length_dw++] = 0x00000001;
+ ib->ptr[ib->length_dw++] = 0x00000000;
+
+ ib->ptr[ib->length_dw++] = 0x00000008;
+ ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
+
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+ if (direct) {
+ r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+ job->fence = dma_fence_get(f);
+ if (r)
+ goto err;
+
+ amdgpu_job_free(job);
+ } else {
+ r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+ if (r)
+ goto err;
+ }
+
+ if (fence)
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
+ return 0;
+
+err:
+ amdgpu_job_free(job);
+ return r;
+}
+
+/**
+ * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working
+ *
+ * @ring: the engine to test on
+ *
+ */
+static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct dma_fence *fence = NULL;
+ long r;
+
+ r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+ goto error;
+ }
+
+ r = uvd_v6_0_enc_get_destroy_msg(ring, 1, true, &fence);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+ goto error;
+ }
+
+ r = dma_fence_wait_timeout(fence, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out.\n");
+ r = -ETIMEDOUT;
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+ } else {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
+ }
+error:
+ dma_fence_put(fence);
+ return r;
+}
static int uvd_v6_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
uvd_v6_0_set_ring_funcs(adev);
+
+ if (uvd_v6_0_enc_support(adev)) {
+ adev->uvd.num_enc_rings = 2;
+ uvd_v6_0_set_enc_ring_funcs(adev);
+ }
+
uvd_v6_0_set_irq_funcs(adev);
return 0;
@@ -102,7 +386,7 @@ static int uvd_v6_0_early_init(void *handle)
static int uvd_v6_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
- int r;
+ int i, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* UVD TRAP */
@@ -110,10 +394,31 @@ static int uvd_v6_0_sw_init(void *handle)
if (r)
return r;
+ /* UVD ENC TRAP */
+ if (uvd_v6_0_enc_support(adev)) {
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.irq);
+ if (r)
+ return r;
+ }
+ }
+
r = amdgpu_uvd_sw_init(adev);
if (r)
return r;
+ if (uvd_v6_0_enc_support(adev)) {
+ struct amd_sched_rq *rq;
+ ring = &adev->uvd.ring_enc[0];
+ rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+ r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
+ rq, amdgpu_sched_jobs);
+ if (r) {
+ DRM_ERROR("Failed setting up UVD ENC run queue.\n");
+ return r;
+ }
+ }
+
r = amdgpu_uvd_resume(adev);
if (r)
return r;
@@ -121,19 +426,38 @@ static int uvd_v6_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
+ if (r)
+ return r;
+
+ if (uvd_v6_0_enc_support(adev)) {
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+ ring = &adev->uvd.ring_enc[i];
+ sprintf(ring->name, "uvd_enc%d", i);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
+ if (r)
+ return r;
+ }
+ }
return r;
}
static int uvd_v6_0_sw_fini(void *handle)
{
- int r;
+ int i, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_uvd_suspend(adev);
if (r)
return r;
+ if (uvd_v6_0_enc_support(adev)) {
+ amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
+
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i)
+ amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
+ }
+
return amdgpu_uvd_sw_fini(adev);
}
@@ -149,7 +473,7 @@ static int uvd_v6_0_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring;
uint32_t tmp;
- int r;
+ int i, r;
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
@@ -189,9 +513,25 @@ static int uvd_v6_0_hw_init(void *handle)
amdgpu_ring_commit(ring);
+ if (uvd_v6_0_enc_support(adev)) {
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+ ring = &adev->uvd.ring_enc[i];
+ ring->ready = true;
+ r = amdgpu_ring_test_ring(ring);
+ if (r) {
+ ring->ready = false;
+ goto done;
+ }
+ }
+ }
+
done:
- if (!r)
- DRM_INFO("UVD initialized successfully.\n");
+ if (!r) {
+ if (uvd_v6_0_enc_support(adev))
+ DRM_INFO("UVD and UVD ENC initialized successfully.\n");
+ else
+ DRM_INFO("UVD initialized successfully.\n");
+ }
return r;
}
@@ -225,11 +565,7 @@ static int uvd_v6_0_suspend(void *handle)
if (r)
return r;
- /* Skip this for APU for now */
- if (!(adev->flags & AMD_IS_APU))
- r = amdgpu_uvd_suspend(adev);
-
- return r;
+ return amdgpu_uvd_suspend(adev);
}
static int uvd_v6_0_resume(void *handle)
@@ -237,12 +573,10 @@ static int uvd_v6_0_resume(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /* Skip this for APU for now */
- if (!(adev->flags & AMD_IS_APU)) {
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
- }
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
return uvd_v6_0_hw_init(adev);
}
@@ -514,6 +848,22 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
+ if (uvd_v6_0_enc_support(adev)) {
+ ring = &adev->uvd.ring_enc[0];
+ WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
+
+ ring = &adev->uvd.ring_enc[1];
+ WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
+ }
+
return 0;
}
@@ -577,6 +927,26 @@ static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
}
/**
+ * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command
+ *
+ * @ring: amdgpu_ring pointer
+ * @fence: fence to emit
+ *
+ * Write enc a fence and a trap command to the ring.
+ */
+static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
+ u64 seq, unsigned flags)
+{
+ WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
+ amdgpu_ring_write(ring, addr);
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, seq);
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
+}
+
+/**
* uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
*
* @ring: amdgpu_ring pointer
@@ -667,6 +1037,24 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
+/**
+ * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer
+ *
+ * @ring: amdgpu_ring pointer
+ * @ib: indirect buffer to execute
+ *
+ * Write enc ring commands to execute the indirect buffer
+ */
+static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+{
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
+ amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, ib->length_dw);
+}
+
static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
@@ -718,6 +1106,33 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0xE);
}
+static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, seq);
+}
+
+static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
+}
+
+static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned int vm_id, uint64_t pd_addr)
+{
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
+ amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, pd_addr >> 12);
+
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
+ amdgpu_ring_write(ring, vm_id);
+}
+
static bool uvd_v6_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -825,8 +1240,31 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ bool int_handled = true;
DRM_DEBUG("IH: UVD TRAP\n");
- amdgpu_fence_process(&adev->uvd.ring);
+
+ switch (entry->src_id) {
+ case 124:
+ amdgpu_fence_process(&adev->uvd.ring);
+ break;
+ case 119:
+ if (likely(uvd_v6_0_enc_support(adev)))
+ amdgpu_fence_process(&adev->uvd.ring_enc[0]);
+ else
+ int_handled = false;
+ break;
+ case 120:
+ if (likely(uvd_v6_0_enc_support(adev)))
+ amdgpu_fence_process(&adev->uvd.ring_enc[1]);
+ else
+ int_handled = false;
+ break;
+ }
+
+ if (false == int_handled)
+ DRM_ERROR("Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+
return 0;
}
@@ -1153,6 +1591,33 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.end_use = amdgpu_uvd_ring_end_use,
};
+static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD_ENC,
+ .align_mask = 0x3f,
+ .nop = HEVC_ENC_CMD_NO_OP,
+ .support_64bit_ptrs = false,
+ .get_rptr = uvd_v6_0_enc_ring_get_rptr,
+ .get_wptr = uvd_v6_0_enc_ring_get_wptr,
+ .set_wptr = uvd_v6_0_enc_ring_set_wptr,
+ .emit_frame_size =
+ 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
+ 6 + /* uvd_v6_0_enc_ring_emit_vm_flush */
+ 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
+ 1, /* uvd_v6_0_enc_ring_insert_end */
+ .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
+ .emit_ib = uvd_v6_0_enc_ring_emit_ib,
+ .emit_fence = uvd_v6_0_enc_ring_emit_fence,
+ .emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush,
+ .emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync,
+ .test_ring = uvd_v6_0_enc_ring_test_ring,
+ .test_ib = uvd_v6_0_enc_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .insert_end = uvd_v6_0_enc_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_uvd_ring_begin_use,
+ .end_use = amdgpu_uvd_ring_end_use,
+};
+
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
{
if (adev->asic_type >= CHIP_POLARIS10) {
@@ -1164,6 +1629,16 @@ static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
}
}
+static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i)
+ adev->uvd.ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
+
+ DRM_INFO("UVD ENC is enabled in VM mode\n");
+}
+
static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
.set = uvd_v6_0_set_interrupt_state,
.process = uvd_v6_0_process_interrupt,
@@ -1171,7 +1646,11 @@ static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->uvd.irq.num_types = 1;
+ if (uvd_v6_0_enc_support(adev))
+ adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1;
+ else
+ adev->uvd.irq.num_types = 1;
+
adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index b8ed8faf2003..6634545060fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -592,11 +592,7 @@ static int uvd_v7_0_suspend(void *handle)
if (r)
return r;
- /* Skip this for APU for now */
- if (!(adev->flags & AMD_IS_APU))
- r = amdgpu_uvd_suspend(adev);
-
- return r;
+ return amdgpu_uvd_suspend(adev);
}
static int uvd_v7_0_resume(void *handle)
@@ -604,12 +600,10 @@ static int uvd_v7_0_resume(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /* Skip this for APU for now */
- if (!(adev->flags & AMD_IS_APU)) {
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
- }
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
return uvd_v7_0_hw_init(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index a3b30d84dbb3..697325737ba8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -260,15 +260,18 @@ static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
return true;
}
- /* Not a retry fault */
- if (!(dw5 & 0x80))
- return true;
-
pasid = dw3 & 0xffff;
/* No PASID, can't identify faulting process */
if (!pasid)
return true;
+ /* Not a retry fault, check fault credit */
+ if (!(dw5 & 0x80)) {
+ if (!amdgpu_vm_pasid_fault_credit(adev, pasid))
+ goto ignore_iv;
+ return true;
+ }
+
addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12);
key = AMDGPU_VM_FAULT(pasid, addr);
r = amdgpu_ih_add_fault(adev, key);
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 9ff69b90df36..f3cfef48aa99 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1254,7 +1254,6 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
uint32_t msg_id, pp_state = 0;
uint32_t pp_support_state = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- void *pp_handle = adev->powerplay.pp_handle;
if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
@@ -1271,7 +1270,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_MC,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
@@ -1289,7 +1289,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_SDMA,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
@@ -1307,7 +1308,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_HDP,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
@@ -1321,7 +1323,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_BIF,
PP_STATE_SUPPORT_LS,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
if (state == AMD_CG_STATE_UNGATE)
@@ -1333,7 +1336,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_BIF,
PP_STATE_SUPPORT_CG,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
@@ -1347,7 +1351,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_DRM,
PP_STATE_SUPPORT_LS,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
@@ -1361,7 +1366,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_ROM,
PP_STATE_SUPPORT_CG,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index a6485254a169..dbf3703cbd1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -465,6 +465,16 @@
#define VCE_CMD_UPDATE_PTB 0x00000107
#define VCE_CMD_FLUSH_TLB 0x00000108
+/* HEVC ENC */
+#define HEVC_ENC_CMD_NO_OP 0x00000000
+#define HEVC_ENC_CMD_END 0x00000001
+#define HEVC_ENC_CMD_FENCE 0x00000003
+#define HEVC_ENC_CMD_TRAP 0x00000004
+#define HEVC_ENC_CMD_IB_VM 0x00000102
+#define HEVC_ENC_CMD_WAIT_GE 0x00000106
+#define HEVC_ENC_CMD_UPDATE_PTB 0x00000107
+#define HEVC_ENC_CMD_FLUSH_TLB 0x00000108
+
/* mmPA_SC_RASTER_CONFIG mask */
#define RB_MAP_PKR0(x) ((x) << 0)
#define RB_MAP_PKR0_MASK (0x3 << 0)