summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c24
1 files changed, 14 insertions, 10 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index cb5a6f1437f8..39e17aae655f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -153,9 +153,6 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
- if (amdgpu_sriov_vf(adev))
- return 0;
-
DRM_DEBUG("\n");
switch (adev->asic_type) {
@@ -197,7 +194,7 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
- err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[0]);
+ err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[i]);
if (err)
goto out;
}
@@ -356,7 +353,9 @@ static void sdma_v5_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* sdma_v5_2_ring_emit_ib - Schedule an IB on the DMA engine
*
* @ring: amdgpu ring pointer
+ * @job: job to retrieve vmid from
* @ib: IB object to schedule
+ * @flags: unused
*
* Schedule an IB in the DMA ring.
*/
@@ -418,7 +417,9 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
* sdma_v5_2_ring_emit_fence - emit a fence on the DMA ring
*
* @ring: amdgpu ring pointer
- * @fence: amdgpu fence object
+ * @addr: address
+ * @seq: sequence number
+ * @flags: fence related flags
*
* Add a DMA fence packet to the ring to write
* the fence seq number and DMA trap packet to generate
@@ -916,6 +917,7 @@ static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
* sdma_v5_2_ring_test_ib - test an IB on the DMA engine
*
* @ring: amdgpu_ring structure holding ring information
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Test a simple IB in the DMA ring.
* Returns 0 on success, error on failure.
@@ -1017,10 +1019,9 @@ static void sdma_v5_2_vm_copy_pte(struct amdgpu_ib *ib,
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
- * @flags: access flags
*
* Update PTEs by writing them manually using sDMA.
*/
@@ -1076,6 +1077,7 @@ static void sdma_v5_2_vm_set_pte_pde(struct amdgpu_ib *ib,
* sdma_v5_2_ring_pad_ib - pad the IB
*
* @ib: indirect buffer to fill with padding
+ * @ring: amdgpu_ring structure holding ring information
*
* Pad the IB with NOPs to a boundary multiple of 8.
*/
@@ -1127,7 +1129,8 @@ static void sdma_v5_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* sdma_v5_2_ring_emit_vm_flush - vm flush using sDMA
*
* @ring: amdgpu_ring pointer
- * @vm: amdgpu_vm pointer
+ * @vmid: vmid number to use
+ * @pd_addr: address
*
* Update the page table base and flush the VM TLB
* using sDMA.
@@ -1700,10 +1703,11 @@ static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev)
/**
* sdma_v5_2_emit_copy_buffer - copy buffer using the sDMA engine
*
- * @ring: amdgpu_ring structure holding ring information
+ * @ib: indirect buffer to copy to
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
+ * @tmz: if a secure copy should be used
*
* Copy GPU buffers using the DMA engine.
* Used by the amdgpu ttm implementation to move pages if
@@ -1729,7 +1733,7 @@ static void sdma_v5_2_emit_copy_buffer(struct amdgpu_ib *ib,
/**
* sdma_v5_2_emit_fill_buffer - fill buffer using the sDMA engine
*
- * @ring: amdgpu_ring structure holding ring information
+ * @ib: indirect buffer to fill
* @src_data: value to write to buffer
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer