summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c309
1 files changed, 210 insertions, 99 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index c06479615e8a..d1d2c27156b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -271,58 +271,65 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
- struct amdgpu_device *adev = ring->adev;
- struct amdgpu_ib ib;
- struct dma_fence *f = NULL;
- uint32_t scratch;
- uint32_t tmp = 0;
- long r;
-
- r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
- return r;
- }
- WREG32(scratch, 0xCAFEDEAD);
- memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err1;
- }
- ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
- ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
- ib.ptr[2] = 0xDEADBEEF;
- ib.length_dw = 3;
-
- r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
- if (r)
- goto err2;
-
- r = dma_fence_wait_timeout(f, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
- r = -ETIMEDOUT;
- goto err2;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
- goto err2;
- }
- tmp = RREG32(scratch);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
- r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
- scratch, tmp);
- r = -EINVAL;
- }
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ib ib;
+ struct dma_fence *f = NULL;
+
+ unsigned index;
+ uint64_t gpu_addr;
+ uint32_t tmp;
+ long r;
+
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
+
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ memset(&ib, 0, sizeof(ib));
+ r = amdgpu_ib_get(adev, NULL, 16, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err1;
+ }
+ ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
+ ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
+ ib.ptr[2] = lower_32_bits(gpu_addr);
+ ib.ptr[3] = upper_32_bits(gpu_addr);
+ ib.ptr[4] = 0xDEADBEEF;
+ ib.length_dw = 5;
+
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+ if (r)
+ goto err2;
+
+ r = dma_fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out.\n");
+ r = -ETIMEDOUT;
+ goto err2;
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+ goto err2;
+ }
+
+ tmp = adev->wb.wb[index];
+ if (tmp == 0xDEADBEEF) {
+ DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
+ } else {
+ DRM_ERROR("ib test on ring %d failed\n", ring->idx);
+ r = -EINVAL;
+ }
+
err2:
- amdgpu_ib_free(adev, &ib, NULL);
- dma_fence_put(f);
+ amdgpu_ib_free(adev, &ib, NULL);
+ dma_fence_put(f);
err1:
- amdgpu_gfx_scratch_free(adev, scratch);
- return r;
+ amdgpu_device_wb_free(adev, index);
+ return r;
}
@@ -1254,23 +1261,23 @@ static int gfx_v9_0_sw_init(void *handle)
adev->gfx.mec.num_queue_per_pipe = 8;
/* KIQ event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
if (r)
return r;
/* EOP Event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
if (r)
return r;
/* Privileged reg */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 184,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 184,
&adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 185,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 185,
&adev->gfx.priv_inst_irq);
if (r)
return r;
@@ -1539,7 +1546,7 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
- tmp = adev->mc.shared_aperture_start >> 48;
+ tmp = adev->gmc.shared_aperture_start >> 48;
WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
}
}
@@ -2954,7 +2961,13 @@ static int gfx_v9_0_hw_fini(void *handle)
gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
if (amdgpu_sriov_vf(adev)) {
- pr_debug("For SRIOV client, shouldn't do anything.\n");
+ gfx_v9_0_cp_gfx_enable(adev, false);
+ /* must disable polling for SRIOV when hw finished, otherwise
+ * CPC engine may still keep fetching WB address which is already
+ * invalid after sw finished and trigger DMAR reading error in
+ * hypervisor side.
+ */
+ WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
return 0;
}
gfx_v9_0_cp_enable(adev, false);
@@ -3585,14 +3598,6 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
ref_and_mask, ref_and_mask, 0x20);
}
-static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
-
- gfx_v9_0_write_data_to_reg(ring, 0, true,
- SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
-}
-
static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
unsigned vmid, bool ctx_switch)
@@ -3686,32 +3691,10 @@ static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
- int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
- uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
- uint64_t flags = AMDGPU_PTE_VALID;
- unsigned eng = ring->vm_inv_eng;
-
- amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
- pd_addr |= flags;
-
- gfx_v9_0_write_data_to_reg(ring, usepfp, true,
- hub->ctx0_ptb_addr_lo32 + (2 * vmid),
- lower_32_bits(pd_addr));
-
- gfx_v9_0_write_data_to_reg(ring, usepfp, true,
- hub->ctx0_ptb_addr_hi32 + (2 * vmid),
- upper_32_bits(pd_addr));
-
- gfx_v9_0_write_data_to_reg(ring, usepfp, true,
- hub->vm_inv_eng0_req + eng, req);
-
- /* wait for the invalidate to complete */
- gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, hub->vm_inv_eng0_ack +
- eng, 0, 1 << vmid, 1 << vmid, 0x20);
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* compute doesn't have PFP */
- if (usepfp) {
+ if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
/* sync PFP to ME, otherwise we might get invalid PFP reads */
amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
amdgpu_ring_write(ring, 0x0);
@@ -3735,6 +3718,105 @@ static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
return wptr;
}
+static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
+ bool acquire)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int pipe_num, tmp, reg;
+ int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
+
+ pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
+
+ /* first me only has 2 entries, GFX and HP3D */
+ if (ring->me > 0)
+ pipe_num -= 2;
+
+ reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
+ tmp = RREG32(reg);
+ tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
+ WREG32(reg, tmp);
+}
+
+static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ bool acquire)
+{
+ int i, pipe;
+ bool reserve;
+ struct amdgpu_ring *iring;
+
+ mutex_lock(&adev->gfx.pipe_reserve_mutex);
+ pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
+ if (acquire)
+ set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+ else
+ clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+
+ if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
+ /* Clear all reservations - everyone reacquires all resources */
+ for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
+ gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
+ true);
+
+ for (i = 0; i < adev->gfx.num_compute_rings; ++i)
+ gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
+ true);
+ } else {
+ /* Lower all pipes without a current reservation */
+ for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
+ iring = &adev->gfx.gfx_ring[i];
+ pipe = amdgpu_gfx_queue_to_bit(adev,
+ iring->me,
+ iring->pipe,
+ 0);
+ reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+ gfx_v9_0_ring_set_pipe_percent(iring, reserve);
+ }
+
+ for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
+ iring = &adev->gfx.compute_ring[i];
+ pipe = amdgpu_gfx_queue_to_bit(adev,
+ iring->me,
+ iring->pipe,
+ 0);
+ reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+ gfx_v9_0_ring_set_pipe_percent(iring, reserve);
+ }
+ }
+
+ mutex_unlock(&adev->gfx.pipe_reserve_mutex);
+}
+
+static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ bool acquire)
+{
+ uint32_t pipe_priority = acquire ? 0x2 : 0x0;
+ uint32_t queue_priority = acquire ? 0xf : 0x0;
+
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ WREG32_SOC15(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
+ WREG32_SOC15(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
+
+ soc15_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+}
+
+static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
+ enum drm_sched_priority priority)
+{
+ struct amdgpu_device *adev = ring->adev;
+ bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
+
+ if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
+ return;
+
+ gfx_v9_0_hqd_set_priority(adev, ring, acquire);
+ gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
+}
+
static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -3788,7 +3870,7 @@ static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
int cnt;
cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
- csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096;
+ csa_addr = amdgpu_csa_vaddr(ring->adev);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
@@ -3806,7 +3888,7 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
uint64_t csa_addr, gds_addr;
int cnt;
- csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096;
+ csa_addr = amdgpu_csa_vaddr(ring->adev);
gds_addr = csa_addr + 4096;
de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
@@ -3904,15 +3986,34 @@ static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
}
static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
- uint32_t val)
+ uint32_t val)
{
+ uint32_t cmd = 0;
+
+ switch (ring->funcs->type) {
+ case AMDGPU_RING_TYPE_GFX:
+ cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
+ break;
+ case AMDGPU_RING_TYPE_KIQ:
+ cmd = (1 << 16); /* no inc addr */
+ break;
+ default:
+ cmd = WR_CONFIRM;
+ break;
+ }
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, (1 << 16)); /* no inc addr */
+ amdgpu_ring_write(ring, cmd);
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, val);
}
+static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
+{
+ gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
+}
+
static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
@@ -4199,7 +4300,9 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_frame_size = /* totally 242 maximum if 16 IBs */
5 + /* COND_EXEC */
7 + /* PIPELINE_SYNC */
- 24 + /* VM_FLUSH */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
+ 2 + /* VM_FLUSH */
8 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */
4 + /* double SWITCH_BUFFER,
@@ -4221,7 +4324,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
- .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v9_0_ring_test_ring,
.test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
@@ -4231,6 +4333,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
.patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
.emit_tmz = gfx_v9_0_ring_emit_tmz,
+ .emit_wreg = gfx_v9_0_ring_emit_wreg,
+ .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
@@ -4245,9 +4349,11 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.emit_frame_size =
20 + /* gfx_v9_0_ring_emit_gds_switch */
7 + /* gfx_v9_0_ring_emit_hdp_flush */
- 5 + /* gfx_v9_0_ring_emit_hdp_invalidate */
+ 5 + /* hdp invalidate */
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
- 24 + /* gfx_v9_0_ring_emit_vm_flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
+ 2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
@@ -4256,11 +4362,13 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
- .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v9_0_ring_test_ring,
.test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .set_priority = gfx_v9_0_ring_set_priority_compute,
+ .emit_wreg = gfx_v9_0_ring_emit_wreg,
+ .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
@@ -4275,9 +4383,11 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
.emit_frame_size =
20 + /* gfx_v9_0_ring_emit_gds_switch */
7 + /* gfx_v9_0_ring_emit_hdp_flush */
- 5 + /* gfx_v9_0_ring_emit_hdp_invalidate */
+ 5 + /* hdp invalidate */
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
- 24 + /* gfx_v9_0_ring_emit_vm_flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
+ 2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
@@ -4288,6 +4398,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_rreg = gfx_v9_0_ring_emit_rreg,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
+ .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
};
static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)