summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdkfd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd')
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c66
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_int.h41
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_regs.h177
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c395
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c886
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h193
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c168
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h294
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c60
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c82
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c969
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.h84
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c188
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c46
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_diq.h290
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h99
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c49
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c18
26 files changed, 3969 insertions, 228 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index 0f4960148126..28551153ec6d 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -12,5 +12,7 @@ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
kfd_kernel_queue_vi.o kfd_packet_manager.o \
kfd_process_queue_manager.o kfd_device_queue_manager.o \
kfd_device_queue_manager_cik.o kfd_device_queue_manager_vi.o \
+ kfd_interrupt.o kfd_events.o cik_event_interrupt.o \
+ kfd_dbgdev.o kfd_dbgmgr.o
obj-$(CONFIG_HSA_AMD) += amdkfd.o
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
new file mode 100644
index 000000000000..211fc48697fa
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "kfd_priv.h"
+#include "kfd_events.h"
+#include "cik_int.h"
+
+static bool cik_event_interrupt_isr(struct kfd_dev *dev,
+ const uint32_t *ih_ring_entry)
+{
+ unsigned int pasid;
+ const struct cik_ih_ring_entry *ihre =
+ (const struct cik_ih_ring_entry *)ih_ring_entry;
+
+ pasid = (ihre->ring_id & 0xffff0000) >> 16;
+
+ /* Do not process in ISR, just request it to be forwarded to WQ. */
+ return (pasid != 0) &&
+ (ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE ||
+ ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG ||
+ ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE);
+}
+
+static void cik_event_interrupt_wq(struct kfd_dev *dev,
+ const uint32_t *ih_ring_entry)
+{
+ unsigned int pasid;
+ const struct cik_ih_ring_entry *ihre =
+ (const struct cik_ih_ring_entry *)ih_ring_entry;
+
+ pasid = (ihre->ring_id & 0xffff0000) >> 16;
+
+ if (pasid == 0)
+ return;
+
+ if (ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE)
+ kfd_signal_event_interrupt(pasid, 0, 0);
+ else if (ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG)
+ kfd_signal_event_interrupt(pasid, ihre->data & 0xFF, 8);
+ else if (ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE)
+ kfd_signal_hw_exception_event(pasid);
+}
+
+const struct kfd_event_interrupt_class event_interrupt_class_cik = {
+ .interrupt_isr = cik_event_interrupt_isr,
+ .interrupt_wq = cik_event_interrupt_wq,
+};
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_int.h b/drivers/gpu/drm/amd/amdkfd/cik_int.h
new file mode 100644
index 000000000000..79a16d24c1b8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/cik_int.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef HSA_RADEON_CIK_INT_H_INCLUDED
+#define HSA_RADEON_CIK_INT_H_INCLUDED
+
+#include <linux/types.h>
+
+struct cik_ih_ring_entry {
+ uint32_t source_id;
+ uint32_t data;
+ uint32_t ring_id;
+ uint32_t reserved;
+};
+
+#define CIK_INTSRC_DEQUEUE_COMPLETE 0xC6
+#define CIK_INTSRC_CP_END_OF_PIPE 0xB5
+#define CIK_INTSRC_CP_BAD_OPCODE 0xB7
+#define CIK_INTSRC_SQ_INTERRUPT_MSG 0xEF
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_regs.h b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
index 01ff332fabd4..183be5b8414f 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_regs.h
+++ b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
@@ -23,33 +23,11 @@
#ifndef CIK_REGS_H
#define CIK_REGS_H
-#define IH_VMID_0_LUT 0x3D40u
-
-#define BIF_DOORBELL_CNTL 0x530Cu
-
-#define SRBM_GFX_CNTL 0xE44
-#define PIPEID(x) ((x) << 0)
-#define MEID(x) ((x) << 2)
-#define VMID(x) ((x) << 4)
-#define QUEUEID(x) ((x) << 8)
-
-#define SQ_CONFIG 0x8C00
-
-#define SH_MEM_BASES 0x8C28
/* if PTR32, these are the bases for scratch and lds */
#define PRIVATE_BASE(x) ((x) << 0) /* scratch */
#define SHARED_BASE(x) ((x) << 16) /* LDS */
-#define SH_MEM_APE1_BASE 0x8C2C
-/* if PTR32, this is the base location of GPUVM */
-#define SH_MEM_APE1_LIMIT 0x8C30
-/* if PTR32, this is the upper limit of GPUVM */
-#define SH_MEM_CONFIG 0x8C34
#define PTR32 (1 << 0)
-#define PRIVATE_ATC (1 << 1)
#define ALIGNMENT_MODE(x) ((x) << 2)
-#define SH_MEM_ALIGNMENT_MODE_DWORD 0
-#define SH_MEM_ALIGNMENT_MODE_DWORD_STRICT 1
-#define SH_MEM_ALIGNMENT_MODE_STRICT 2
#define SH_MEM_ALIGNMENT_MODE_UNALIGNED 3
#define DEFAULT_MTYPE(x) ((x) << 4)
#define APE1_MTYPE(x) ((x) << 7)
@@ -58,137 +36,34 @@
#define MTYPE_CACHED 0
#define MTYPE_NONCACHED 3
-
-#define SH_STATIC_MEM_CONFIG 0x9604u
-
-#define TC_CFG_L1_LOAD_POLICY0 0xAC68
-#define TC_CFG_L1_LOAD_POLICY1 0xAC6C
-#define TC_CFG_L1_STORE_POLICY 0xAC70
-#define TC_CFG_L2_LOAD_POLICY0 0xAC74
-#define TC_CFG_L2_LOAD_POLICY1 0xAC78
-#define TC_CFG_L2_STORE_POLICY0 0xAC7C
-#define TC_CFG_L2_STORE_POLICY1 0xAC80
-#define TC_CFG_L2_ATOMIC_POLICY 0xAC84
-#define TC_CFG_L1_VOLATILE 0xAC88
-#define TC_CFG_L2_VOLATILE 0xAC8C
-
-#define CP_PQ_WPTR_POLL_CNTL 0xC20C
-#define WPTR_POLL_EN (1 << 31)
-
-#define CPC_INT_CNTL 0xC2D0
-#define CP_ME1_PIPE0_INT_CNTL 0xC214
-#define CP_ME1_PIPE1_INT_CNTL 0xC218
-#define CP_ME1_PIPE2_INT_CNTL 0xC21C
-#define CP_ME1_PIPE3_INT_CNTL 0xC220
-#define CP_ME2_PIPE0_INT_CNTL 0xC224
-#define CP_ME2_PIPE1_INT_CNTL 0xC228
-#define CP_ME2_PIPE2_INT_CNTL 0xC22C
-#define CP_ME2_PIPE3_INT_CNTL 0xC230
-#define DEQUEUE_REQUEST_INT_ENABLE (1 << 13)
-#define WRM_POLL_TIMEOUT_INT_ENABLE (1 << 17)
-#define PRIV_REG_INT_ENABLE (1 << 23)
-#define TIME_STAMP_INT_ENABLE (1 << 26)
-#define GENERIC2_INT_ENABLE (1 << 29)
-#define GENERIC1_INT_ENABLE (1 << 30)
-#define GENERIC0_INT_ENABLE (1 << 31)
-#define CP_ME1_PIPE0_INT_STATUS 0xC214
-#define CP_ME1_PIPE1_INT_STATUS 0xC218
-#define CP_ME1_PIPE2_INT_STATUS 0xC21C
-#define CP_ME1_PIPE3_INT_STATUS 0xC220
-#define CP_ME2_PIPE0_INT_STATUS 0xC224
-#define CP_ME2_PIPE1_INT_STATUS 0xC228
-#define CP_ME2_PIPE2_INT_STATUS 0xC22C
-#define CP_ME2_PIPE3_INT_STATUS 0xC230
-#define DEQUEUE_REQUEST_INT_STATUS (1 << 13)
-#define WRM_POLL_TIMEOUT_INT_STATUS (1 << 17)
-#define PRIV_REG_INT_STATUS (1 << 23)
-#define TIME_STAMP_INT_STATUS (1 << 26)
-#define GENERIC2_INT_STATUS (1 << 29)
-#define GENERIC1_INT_STATUS (1 << 30)
-#define GENERIC0_INT_STATUS (1 << 31)
-
-#define CP_HPD_EOP_BASE_ADDR 0xC904
-#define CP_HPD_EOP_BASE_ADDR_HI 0xC908
-#define CP_HPD_EOP_VMID 0xC90C
-#define CP_HPD_EOP_CONTROL 0xC910
-#define EOP_SIZE(x) ((x) << 0)
-#define EOP_SIZE_MASK (0x3f << 0)
-#define CP_MQD_BASE_ADDR 0xC914
-#define CP_MQD_BASE_ADDR_HI 0xC918
-#define CP_HQD_ACTIVE 0xC91C
-#define CP_HQD_VMID 0xC920
-
-#define CP_HQD_PERSISTENT_STATE 0xC924u
#define DEFAULT_CP_HQD_PERSISTENT_STATE (0x33U << 8)
#define PRELOAD_REQ (1 << 0)
-#define CP_HQD_PIPE_PRIORITY 0xC928u
-#define CP_HQD_QUEUE_PRIORITY 0xC92Cu
-#define CP_HQD_QUANTUM 0xC930u
+#define MQD_CONTROL_PRIV_STATE_EN (1U << 8)
+
+#define DEFAULT_MIN_IB_AVAIL_SIZE (3U << 20)
+
+#define IB_ATC_EN (1U << 23)
+
#define QUANTUM_EN 1U
#define QUANTUM_SCALE_1MS (1U << 4)
#define QUANTUM_DURATION(x) ((x) << 8)
-#define CP_HQD_PQ_BASE 0xC934
-#define CP_HQD_PQ_BASE_HI 0xC938
-#define CP_HQD_PQ_RPTR 0xC93C
-#define CP_HQD_PQ_RPTR_REPORT_ADDR 0xC940
-#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI 0xC944
-#define CP_HQD_PQ_WPTR_POLL_ADDR 0xC948
-#define CP_HQD_PQ_WPTR_POLL_ADDR_HI 0xC94C
-#define CP_HQD_PQ_DOORBELL_CONTROL 0xC950
-#define DOORBELL_OFFSET(x) ((x) << 2)
-#define DOORBELL_OFFSET_MASK (0x1fffff << 2)
-#define DOORBELL_SOURCE (1 << 28)
-#define DOORBELL_SCHD_HIT (1 << 29)
-#define DOORBELL_EN (1 << 30)
-#define DOORBELL_HIT (1 << 31)
-#define CP_HQD_PQ_WPTR 0xC954
-#define CP_HQD_PQ_CONTROL 0xC958
-#define QUEUE_SIZE(x) ((x) << 0)
-#define QUEUE_SIZE_MASK (0x3f << 0)
#define RPTR_BLOCK_SIZE(x) ((x) << 8)
-#define RPTR_BLOCK_SIZE_MASK (0x3f << 8)
#define MIN_AVAIL_SIZE(x) ((x) << 20)
-#define PQ_ATC_EN (1 << 23)
-#define PQ_VOLATILE (1 << 26)
-#define NO_UPDATE_RPTR (1 << 27)
-#define UNORD_DISPATCH (1 << 28)
-#define ROQ_PQ_IB_FLIP (1 << 29)
-#define PRIV_STATE (1 << 30)
-#define KMD_QUEUE (1 << 31)
-
#define DEFAULT_RPTR_BLOCK_SIZE RPTR_BLOCK_SIZE(5)
#define DEFAULT_MIN_AVAIL_SIZE MIN_AVAIL_SIZE(3)
-#define CP_HQD_IB_BASE_ADDR 0xC95Cu
-#define CP_HQD_IB_BASE_ADDR_HI 0xC960u
-#define CP_HQD_IB_RPTR 0xC964u
-#define CP_HQD_IB_CONTROL 0xC968u
-#define IB_ATC_EN (1U << 23)
-#define DEFAULT_MIN_IB_AVAIL_SIZE (3U << 20)
-
-#define AQL_ENABLE 1
-
-#define CP_HQD_DEQUEUE_REQUEST 0xC974
-#define DEQUEUE_REQUEST_DRAIN 1
-#define DEQUEUE_REQUEST_RESET 2
-#define DEQUEUE_INT (1U << 8)
+#define PQ_ATC_EN (1 << 23)
+#define NO_UPDATE_RPTR (1 << 27)
-#define CP_HQD_SEMA_CMD 0xC97Cu
-#define CP_HQD_MSG_TYPE 0xC980u
-#define CP_HQD_ATOMIC0_PREOP_LO 0xC984u
-#define CP_HQD_ATOMIC0_PREOP_HI 0xC988u
-#define CP_HQD_ATOMIC1_PREOP_LO 0xC98Cu
-#define CP_HQD_ATOMIC1_PREOP_HI 0xC990u
-#define CP_HQD_HQ_SCHEDULER0 0xC994u
-#define CP_HQD_HQ_SCHEDULER1 0xC998u
+#define DOORBELL_OFFSET(x) ((x) << 2)
+#define DOORBELL_EN (1 << 30)
+#define PRIV_STATE (1 << 30)
+#define KMD_QUEUE (1 << 31)
-#define CP_MQD_CONTROL 0xC99C
-#define MQD_VMID(x) ((x) << 0)
-#define MQD_VMID_MASK (0xf << 0)
-#define MQD_CONTROL_PRIV_STATE_EN (1U << 8)
+#define AQL_ENABLE 1
#define SDMA_RB_VMID(x) (x << 24)
#define SDMA_RB_ENABLE (1 << 0)
@@ -202,33 +77,7 @@
#define SDMA_VA_SHARED_BASE(x) (x << 8)
#define GRBM_GFX_INDEX 0x30800
-#define INSTANCE_INDEX(x) ((x) << 0)
-#define SH_INDEX(x) ((x) << 8)
-#define SE_INDEX(x) ((x) << 16)
-#define SH_BROADCAST_WRITES (1 << 29)
-#define INSTANCE_BROADCAST_WRITES (1 << 30)
-#define SE_BROADCAST_WRITES (1 << 31)
-
-#define SQC_CACHES 0x30d20
-#define SQC_POLICY 0x8C38u
-#define SQC_VOLATILE 0x8C3Cu
-#define CP_PERFMON_CNTL 0x36020
-
-#define ATC_VMID0_PASID_MAPPING 0x339Cu
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS 0x3398u
#define ATC_VMID_PASID_MAPPING_VALID (1U << 31)
-#define ATC_VM_APERTURE0_CNTL 0x3310u
-#define ATS_ACCESS_MODE_NEVER 0
-#define ATS_ACCESS_MODE_ALWAYS 1
-
-#define ATC_VM_APERTURE0_CNTL2 0x3318u
-#define ATC_VM_APERTURE0_HIGH_ADDR 0x3308u
-#define ATC_VM_APERTURE0_LOW_ADDR 0x3300u
-#define ATC_VM_APERTURE1_CNTL 0x3314u
-#define ATC_VM_APERTURE1_CNTL2 0x331Cu
-#define ATC_VM_APERTURE1_HIGH_ADDR 0x330Cu
-#define ATC_VM_APERTURE1_LOW_ADDR 0x3304u
-
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 19a4fba46e4e..c991973019d0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -35,6 +35,7 @@
#include <asm/processor.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
+#include "kfd_dbgmgr.h"
static long kfd_ioctl(struct file *, unsigned int, unsigned long);
static int kfd_open(struct inode *, struct file *);
@@ -289,8 +290,10 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
args->queue_id = queue_id;
+
/* Return gpu_id as doorbell offset for mmap usage */
- args->doorbell_offset = args->gpu_id << PAGE_SHIFT;
+ args->doorbell_offset = (KFD_MMAP_DOORBELL_MASK | args->gpu_id);
+ args->doorbell_offset <<= PAGE_SHIFT;
mutex_unlock(&p->mutex);
@@ -430,6 +433,301 @@ out:
return err;
}
+static int kfd_ioctl_dbg_register(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_dbg_register_args *args = data;
+ struct kfd_dev *dev;
+ struct kfd_dbgmgr *dbgmgr_ptr;
+ struct kfd_process_device *pdd;
+ bool create_ok;
+ long status = 0;
+
+ dev = kfd_device_by_id(args->gpu_id);
+ if (dev == NULL)
+ return -EINVAL;
+
+ if (dev->device_info->asic_family == CHIP_CARRIZO) {
+ pr_debug("kfd_ioctl_dbg_register not supported on CZ\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(kfd_get_dbgmgr_mutex());
+ mutex_lock(&p->mutex);
+
+ /*
+ * make sure that we have pdd, if this the first queue created for
+ * this process
+ */
+ pdd = kfd_bind_process_to_device(dev, p);
+ if (IS_ERR(pdd)) {
+ mutex_unlock(&p->mutex);
+ mutex_unlock(kfd_get_dbgmgr_mutex());
+ return PTR_ERR(pdd);
+ }
+
+ if (dev->dbgmgr == NULL) {
+ /* In case of a legal call, we have no dbgmgr yet */
+ create_ok = kfd_dbgmgr_create(&dbgmgr_ptr, dev);
+ if (create_ok) {
+ status = kfd_dbgmgr_register(dbgmgr_ptr, p);
+ if (status != 0)
+ kfd_dbgmgr_destroy(dbgmgr_ptr);
+ else
+ dev->dbgmgr = dbgmgr_ptr;
+ }
+ } else {
+ pr_debug("debugger already registered\n");
+ status = -EINVAL;
+ }
+
+ mutex_unlock(&p->mutex);
+ mutex_unlock(kfd_get_dbgmgr_mutex());
+
+ return status;
+}
+
+static int kfd_ioctl_dbg_unrgesiter(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_dbg_unregister_args *args = data;
+ struct kfd_dev *dev;
+ long status;
+
+ dev = kfd_device_by_id(args->gpu_id);
+ if (dev == NULL)
+ return -EINVAL;
+
+ if (dev->device_info->asic_family == CHIP_CARRIZO) {
+ pr_debug("kfd_ioctl_dbg_unrgesiter not supported on CZ\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(kfd_get_dbgmgr_mutex());
+
+ status = kfd_dbgmgr_unregister(dev->dbgmgr, p);
+ if (status == 0) {
+ kfd_dbgmgr_destroy(dev->dbgmgr);
+ dev->dbgmgr = NULL;
+ }
+
+ mutex_unlock(kfd_get_dbgmgr_mutex());
+
+ return status;
+}
+
+/*
+ * Parse and generate variable size data structure for address watch.
+ * Total size of the buffer and # watch points is limited in order
+ * to prevent kernel abuse. (no bearing to the much smaller HW limitation
+ * which is enforced by dbgdev module)
+ * please also note that the watch address itself are not "copied from user",
+ * since it be set into the HW in user mode values.
+ *
+ */
+static int kfd_ioctl_dbg_address_watch(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_dbg_address_watch_args *args = data;
+ struct kfd_dev *dev;
+ struct dbg_address_watch_info aw_info;
+ unsigned char *args_buff;
+ long status;
+ void __user *cmd_from_user;
+ uint64_t watch_mask_value = 0;
+ unsigned int args_idx = 0;
+
+ memset((void *) &aw_info, 0, sizeof(struct dbg_address_watch_info));
+
+ dev = kfd_device_by_id(args->gpu_id);
+ if (dev == NULL)
+ return -EINVAL;
+
+ if (dev->device_info->asic_family == CHIP_CARRIZO) {
+ pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n");
+ return -EINVAL;
+ }
+
+ cmd_from_user = (void __user *) args->content_ptr;
+
+ /* Validate arguments */
+
+ if ((args->buf_size_in_bytes > MAX_ALLOWED_AW_BUFF_SIZE) ||
+ (args->buf_size_in_bytes <= sizeof(*args) + sizeof(int) * 2) ||
+ (cmd_from_user == NULL))
+ return -EINVAL;
+
+ /* this is the actual buffer to work with */
+
+ args_buff = kmalloc(args->buf_size_in_bytes -
+ sizeof(*args), GFP_KERNEL);
+ if (args_buff == NULL)
+ return -ENOMEM;
+
+ status = copy_from_user(args_buff, cmd_from_user,
+ args->buf_size_in_bytes - sizeof(*args));
+
+ if (status != 0) {
+ pr_debug("Failed to copy address watch user data\n");
+ kfree(args_buff);
+ return -EINVAL;
+ }
+
+ aw_info.process = p;
+
+ aw_info.num_watch_points = *((uint32_t *)(&args_buff[args_idx]));
+ args_idx += sizeof(aw_info.num_watch_points);
+
+ aw_info.watch_mode = (enum HSA_DBG_WATCH_MODE *) &args_buff[args_idx];
+ args_idx += sizeof(enum HSA_DBG_WATCH_MODE) * aw_info.num_watch_points;
+
+ /*
+ * set watch address base pointer to point on the array base
+ * within args_buff
+ */
+ aw_info.watch_address = (uint64_t *) &args_buff[args_idx];
+
+ /* skip over the addresses buffer */
+ args_idx += sizeof(aw_info.watch_address) * aw_info.num_watch_points;
+
+ if (args_idx >= args->buf_size_in_bytes - sizeof(*args)) {
+ kfree(args_buff);
+ return -EINVAL;
+ }
+
+ watch_mask_value = (uint64_t) args_buff[args_idx];
+
+ if (watch_mask_value > 0) {
+ /*
+ * There is an array of masks.
+ * set watch mask base pointer to point on the array base
+ * within args_buff
+ */
+ aw_info.watch_mask = (uint64_t *) &args_buff[args_idx];
+
+ /* skip over the masks buffer */
+ args_idx += sizeof(aw_info.watch_mask) *
+ aw_info.num_watch_points;
+ } else {
+ /* just the NULL mask, set to NULL and skip over it */
+ aw_info.watch_mask = NULL;
+ args_idx += sizeof(aw_info.watch_mask);
+ }
+
+ if (args_idx >= args->buf_size_in_bytes - sizeof(args)) {
+ kfree(args_buff);
+ return -EINVAL;
+ }
+
+ /* Currently HSA Event is not supported for DBG */
+ aw_info.watch_event = NULL;
+
+ mutex_lock(kfd_get_dbgmgr_mutex());
+
+ status = kfd_dbgmgr_address_watch(dev->dbgmgr, &aw_info);
+
+ mutex_unlock(kfd_get_dbgmgr_mutex());
+
+ kfree(args_buff);
+
+ return status;
+}
+
+/* Parse and generate fixed size data structure for wave control */
+static int kfd_ioctl_dbg_wave_control(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_dbg_wave_control_args *args = data;
+ struct kfd_dev *dev;
+ struct dbg_wave_control_info wac_info;
+ unsigned char *args_buff;
+ uint32_t computed_buff_size;
+ long status;
+ void __user *cmd_from_user;
+ unsigned int args_idx = 0;
+
+ memset((void *) &wac_info, 0, sizeof(struct dbg_wave_control_info));
+
+ /* we use compact form, independent of the packing attribute value */
+ computed_buff_size = sizeof(*args) +
+ sizeof(wac_info.mode) +
+ sizeof(wac_info.operand) +
+ sizeof(wac_info.dbgWave_msg.DbgWaveMsg) +
+ sizeof(wac_info.dbgWave_msg.MemoryVA) +
+ sizeof(wac_info.trapId);
+
+ dev = kfd_device_by_id(args->gpu_id);
+ if (dev == NULL)
+ return -EINVAL;
+
+ if (dev->device_info->asic_family == CHIP_CARRIZO) {
+ pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n");
+ return -EINVAL;
+ }
+
+ /* input size must match the computed "compact" size */
+ if (args->buf_size_in_bytes != computed_buff_size) {
+ pr_debug("size mismatch, computed : actual %u : %u\n",
+ args->buf_size_in_bytes, computed_buff_size);
+ return -EINVAL;
+ }
+
+ cmd_from_user = (void __user *) args->content_ptr;
+
+ if (cmd_from_user == NULL)
+ return -EINVAL;
+
+ /* this is the actual buffer to work with */
+
+ args_buff = kmalloc(args->buf_size_in_bytes - sizeof(*args),
+ GFP_KERNEL);
+
+ if (args_buff == NULL)
+ return -ENOMEM;
+
+ /* Now copy the entire buffer from user */
+ status = copy_from_user(args_buff, cmd_from_user,
+ args->buf_size_in_bytes - sizeof(*args));
+ if (status != 0) {
+ pr_debug("Failed to copy wave control user data\n");
+ kfree(args_buff);
+ return -EINVAL;
+ }
+
+ /* move ptr to the start of the "pay-load" area */
+ wac_info.process = p;
+
+ wac_info.operand = *((enum HSA_DBG_WAVEOP *)(&args_buff[args_idx]));
+ args_idx += sizeof(wac_info.operand);
+
+ wac_info.mode = *((enum HSA_DBG_WAVEMODE *)(&args_buff[args_idx]));
+ args_idx += sizeof(wac_info.mode);
+
+ wac_info.trapId = *((uint32_t *)(&args_buff[args_idx]));
+ args_idx += sizeof(wac_info.trapId);
+
+ wac_info.dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value =
+ *((uint32_t *)(&args_buff[args_idx]));
+ wac_info.dbgWave_msg.MemoryVA = NULL;
+
+ mutex_lock(kfd_get_dbgmgr_mutex());
+
+ pr_debug("Calling dbg manager process %p, operand %u, mode %u, trapId %u, message %u\n",
+ wac_info.process, wac_info.operand,
+ wac_info.mode, wac_info.trapId,
+ wac_info.dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value);
+
+ status = kfd_dbgmgr_wave_control(dev->dbgmgr, &wac_info);
+
+ pr_debug("Returned status of dbg manager is %ld\n", status);
+
+ mutex_unlock(kfd_get_dbgmgr_mutex());
+
+ kfree(args_buff);
+
+ return status;
+}
+
static int kfd_ioctl_get_clock_counters(struct file *filep,
struct kfd_process *p, void *data)
{
@@ -514,6 +812,62 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
return 0;
}
+static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
+ void *data)
+{
+ struct kfd_ioctl_create_event_args *args = data;
+ int err;
+
+ err = kfd_event_create(filp, p, args->event_type,
+ args->auto_reset != 0, args->node_id,
+ &args->event_id, &args->event_trigger_data,
+ &args->event_page_offset,
+ &args->event_slot_index);
+
+ return err;
+}
+
+static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p,
+ void *data)
+{
+ struct kfd_ioctl_destroy_event_args *args = data;
+
+ return kfd_event_destroy(p, args->event_id);
+}
+
+static int kfd_ioctl_set_event(struct file *filp, struct kfd_process *p,
+ void *data)
+{
+ struct kfd_ioctl_set_event_args *args = data;
+
+ return kfd_set_event(p, args->event_id);
+}
+
+static int kfd_ioctl_reset_event(struct file *filp, struct kfd_process *p,
+ void *data)
+{
+ struct kfd_ioctl_reset_event_args *args = data;
+
+ return kfd_reset_event(p, args->event_id);
+}
+
+static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
+ void *data)
+{
+ struct kfd_ioctl_wait_events_args *args = data;
+ enum kfd_event_wait_result wait_result;
+ int err;
+
+ err = kfd_wait_on_events(p, args->num_events,
+ (void __user *)args->events_ptr,
+ (args->wait_for_all != 0),
+ args->timeout, &wait_result);
+
+ args->wait_result = wait_result;
+
+ return err;
+}
+
#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
@@ -539,6 +893,33 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
kfd_ioctl_update_queue, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_EVENT,
+ kfd_ioctl_create_event, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_EVENT,
+ kfd_ioctl_destroy_event, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_EVENT,
+ kfd_ioctl_set_event, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_RESET_EVENT,
+ kfd_ioctl_reset_event, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_WAIT_EVENTS,
+ kfd_ioctl_wait_events, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_REGISTER,
+ kfd_ioctl_dbg_register, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER,
+ kfd_ioctl_dbg_unrgesiter, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH,
+ kfd_ioctl_dbg_address_watch, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL,
+ kfd_ioctl_dbg_wave_control, 0),
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
@@ -639,5 +1020,15 @@ static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
if (IS_ERR(process))
return PTR_ERR(process);
- return kfd_doorbell_mmap(process, vma);
+ if ((vma->vm_pgoff & KFD_MMAP_DOORBELL_MASK) ==
+ KFD_MMAP_DOORBELL_MASK) {
+ vma->vm_pgoff = vma->vm_pgoff ^ KFD_MMAP_DOORBELL_MASK;
+ return kfd_doorbell_mmap(process, vma);
+ } else if ((vma->vm_pgoff & KFD_MMAP_EVENTS_MASK) ==
+ KFD_MMAP_EVENTS_MASK) {
+ vma->vm_pgoff = vma->vm_pgoff ^ KFD_MMAP_EVENTS_MASK;
+ return kfd_event_mmap(process, vma);
+ }
+
+ return -EFAULT;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
new file mode 100644
index 000000000000..c34c393e9aea
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -0,0 +1,886 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+
+#include "kfd_pm4_headers.h"
+#include "kfd_pm4_headers_diq.h"
+#include "kfd_kernel_queue.h"
+#include "kfd_priv.h"
+#include "kfd_pm4_opcodes.h"
+#include "cik_regs.h"
+#include "kfd_dbgmgr.h"
+#include "kfd_dbgdev.h"
+#include "kfd_device_queue_manager.h"
+#include "../../radeon/cik_reg.h"
+
+static void dbgdev_address_watch_disable_nodiq(struct kfd_dev *dev)
+{
+ BUG_ON(!dev || !dev->kfd2kgd);
+
+ dev->kfd2kgd->address_watch_disable(dev->kgd);
+}
+
+static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
+ unsigned int pasid, uint64_t vmid0_address,
+ uint32_t *packet_buff, size_t size_in_bytes)
+{
+ struct pm4__release_mem *rm_packet;
+ struct pm4__indirect_buffer_pasid *ib_packet;
+ struct kfd_mem_obj *mem_obj;
+ size_t pq_packets_size_in_bytes;
+ union ULARGE_INTEGER *largep;
+ union ULARGE_INTEGER addr;
+ struct kernel_queue *kq;
+ uint64_t *rm_state;
+ unsigned int *ib_packet_buff;
+ int status;
+
+ BUG_ON(!dbgdev || !dbgdev->kq || !packet_buff || !size_in_bytes);
+
+ kq = dbgdev->kq;
+
+ pq_packets_size_in_bytes = sizeof(struct pm4__release_mem) +
+ sizeof(struct pm4__indirect_buffer_pasid);
+
+ /*
+ * We acquire a buffer from DIQ
+ * The receive packet buff will be sitting on the Indirect Buffer
+ * and in the PQ we put the IB packet + sync packet(s).
+ */
+ status = kq->ops.acquire_packet_buffer(kq,
+ pq_packets_size_in_bytes / sizeof(uint32_t),
+ &ib_packet_buff);
+ if (status != 0) {
+ pr_err("amdkfd: acquire_packet_buffer failed\n");
+ return status;
+ }
+
+ memset(ib_packet_buff, 0, pq_packets_size_in_bytes);
+
+ ib_packet = (struct pm4__indirect_buffer_pasid *) (ib_packet_buff);
+
+ ib_packet->header.count = 3;
+ ib_packet->header.opcode = IT_INDIRECT_BUFFER_PASID;
+ ib_packet->header.type = PM4_TYPE_3;
+
+ largep = (union ULARGE_INTEGER *) &vmid0_address;
+
+ ib_packet->bitfields2.ib_base_lo = largep->u.low_part >> 2;
+ ib_packet->bitfields3.ib_base_hi = largep->u.high_part;
+
+ ib_packet->control = (1 << 23) | (1 << 31) |
+ ((size_in_bytes / sizeof(uint32_t)) & 0xfffff);
+
+ ib_packet->bitfields5.pasid = pasid;
+
+ /*
+ * for now we use release mem for GPU-CPU synchronization
+ * Consider WaitRegMem + WriteData as a better alternative
+ * we get a GART allocations ( gpu/cpu mapping),
+ * for the sync variable, and wait until:
+ * (a) Sync with HW
+ * (b) Sync var is written by CP to mem.
+ */
+ rm_packet = (struct pm4__release_mem *) (ib_packet_buff +
+ (sizeof(struct pm4__indirect_buffer_pasid) /
+ sizeof(unsigned int)));
+
+ status = kfd_gtt_sa_allocate(dbgdev->dev, sizeof(uint64_t),
+ &mem_obj);
+
+ if (status != 0) {
+ pr_err("amdkfd: Failed to allocate GART memory\n");
+ kq->ops.rollback_packet(kq);
+ return status;
+ }
+
+ rm_state = (uint64_t *) mem_obj->cpu_ptr;
+
+ *rm_state = QUEUESTATE__ACTIVE_COMPLETION_PENDING;
+
+ rm_packet->header.opcode = IT_RELEASE_MEM;
+ rm_packet->header.type = PM4_TYPE_3;
+ rm_packet->header.count = sizeof(struct pm4__release_mem) /
+ sizeof(unsigned int) - 2;
+
+ rm_packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
+ rm_packet->bitfields2.event_index =
+ event_index___release_mem__end_of_pipe;
+
+ rm_packet->bitfields2.cache_policy = cache_policy___release_mem__lru;
+ rm_packet->bitfields2.atc = 0;
+ rm_packet->bitfields2.tc_wb_action_ena = 1;
+
+ addr.quad_part = mem_obj->gpu_addr;
+
+ rm_packet->bitfields4.address_lo_32b = addr.u.low_part >> 2;
+ rm_packet->address_hi = addr.u.high_part;
+
+ rm_packet->bitfields3.data_sel =
+ data_sel___release_mem__send_64_bit_data;
+
+ rm_packet->bitfields3.int_sel =
+ int_sel___release_mem__send_data_after_write_confirm;
+
+ rm_packet->bitfields3.dst_sel =
+ dst_sel___release_mem__memory_controller;
+
+ rm_packet->data_lo = QUEUESTATE__ACTIVE;
+
+ kq->ops.submit_packet(kq);
+
+ /* Wait till CP writes sync code: */
+ status = amdkfd_fence_wait_timeout(
+ (unsigned int *) rm_state,
+ QUEUESTATE__ACTIVE, 1500);
+
+ kfd_gtt_sa_free(dbgdev->dev, mem_obj);
+
+ return status;
+}
+
+static int dbgdev_register_nodiq(struct kfd_dbgdev *dbgdev)
+{
+ BUG_ON(!dbgdev);
+
+ /*
+ * no action is needed in this case,
+ * just make sure diq will not be used
+ */
+
+ dbgdev->kq = NULL;
+
+ return 0;
+}
+
+static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
+{
+ struct queue_properties properties;
+ unsigned int qid;
+ struct kernel_queue *kq = NULL;
+ int status;
+
+ BUG_ON(!dbgdev || !dbgdev->pqm || !dbgdev->dev);
+
+ status = pqm_create_queue(dbgdev->pqm, dbgdev->dev, NULL,
+ &properties, 0, KFD_QUEUE_TYPE_DIQ,
+ &qid);
+
+ if (status) {
+ pr_err("amdkfd: Failed to create DIQ\n");
+ return status;
+ }
+
+ pr_debug("DIQ Created with queue id: %d\n", qid);
+
+ kq = pqm_get_kernel_queue(dbgdev->pqm, qid);
+
+ if (kq == NULL) {
+ pr_err("amdkfd: Error getting DIQ\n");
+ pqm_destroy_queue(dbgdev->pqm, qid);
+ return -EFAULT;
+ }
+
+ dbgdev->kq = kq;
+
+ return status;
+}
+
+static int dbgdev_unregister_nodiq(struct kfd_dbgdev *dbgdev)
+{
+ BUG_ON(!dbgdev || !dbgdev->dev);
+
+ /* disable watch address */
+ dbgdev_address_watch_disable_nodiq(dbgdev->dev);
+ return 0;
+}
+
+static int dbgdev_unregister_diq(struct kfd_dbgdev *dbgdev)
+{
+ /* todo - disable address watch */
+ int status;
+
+ BUG_ON(!dbgdev || !dbgdev->pqm || !dbgdev->kq);
+
+ status = pqm_destroy_queue(dbgdev->pqm,
+ dbgdev->kq->queue->properties.queue_id);
+ dbgdev->kq = NULL;
+
+ return status;
+}
+
+static void dbgdev_address_watch_set_registers(
+ const struct dbg_address_watch_info *adw_info,
+ union TCP_WATCH_ADDR_H_BITS *addrHi,
+ union TCP_WATCH_ADDR_L_BITS *addrLo,
+ union TCP_WATCH_CNTL_BITS *cntl,
+ unsigned int index, unsigned int vmid)
+{
+ union ULARGE_INTEGER addr;
+
+ BUG_ON(!adw_info || !addrHi || !addrLo || !cntl);
+
+ addr.quad_part = 0;
+ addrHi->u32All = 0;
+ addrLo->u32All = 0;
+ cntl->u32All = 0;
+
+ if (adw_info->watch_mask != NULL)
+ cntl->bitfields.mask =
+ (uint32_t) (adw_info->watch_mask[index] &
+ ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK);
+ else
+ cntl->bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK;
+
+ addr.quad_part = (unsigned long long) adw_info->watch_address[index];
+
+ addrHi->bitfields.addr = addr.u.high_part &
+ ADDRESS_WATCH_REG_ADDHIGH_MASK;
+ addrLo->bitfields.addr =
+ (addr.u.low_part >> ADDRESS_WATCH_REG_ADDLOW_SHIFT);
+
+ cntl->bitfields.mode = adw_info->watch_mode[index];
+ cntl->bitfields.vmid = (uint32_t) vmid;
+ /* for now assume it is an ATC address */
+ cntl->u32All |= ADDRESS_WATCH_REG_CNTL_ATC_BIT;
+
+ pr_debug("\t\t%20s %08x\n", "set reg mask :", cntl->bitfields.mask);
+ pr_debug("\t\t%20s %08x\n", "set reg add high :",
+ addrHi->bitfields.addr);
+ pr_debug("\t\t%20s %08x\n", "set reg add low :",
+ addrLo->bitfields.addr);
+}
+
+static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
+ struct dbg_address_watch_info *adw_info)
+{
+ union TCP_WATCH_ADDR_H_BITS addrHi;
+ union TCP_WATCH_ADDR_L_BITS addrLo;
+ union TCP_WATCH_CNTL_BITS cntl;
+ struct kfd_process_device *pdd;
+ unsigned int i;
+
+ BUG_ON(!dbgdev || !dbgdev->dev || !adw_info);
+
+ /* taking the vmid for that process on the safe way using pdd */
+ pdd = kfd_get_process_device_data(dbgdev->dev,
+ adw_info->process);
+ if (!pdd) {
+ pr_err("amdkfd: Failed to get pdd for wave control no DIQ\n");
+ return -EFAULT;
+ }
+
+ addrHi.u32All = 0;
+ addrLo.u32All = 0;
+ cntl.u32All = 0;
+
+ if ((adw_info->num_watch_points > MAX_WATCH_ADDRESSES) ||
+ (adw_info->num_watch_points == 0)) {
+ pr_err("amdkfd: num_watch_points is invalid\n");
+ return -EINVAL;
+ }
+
+ if ((adw_info->watch_mode == NULL) ||
+ (adw_info->watch_address == NULL)) {
+ pr_err("amdkfd: adw_info fields are not valid\n");
+ return -EINVAL;
+ }
+
+ for (i = 0 ; i < adw_info->num_watch_points ; i++) {
+ dbgdev_address_watch_set_registers(adw_info, &addrHi, &addrLo,
+ &cntl, i, pdd->qpd.vmid);
+
+ pr_debug("\t\t%30s\n", "* * * * * * * * * * * * * * * * * *");
+ pr_debug("\t\t%20s %08x\n", "register index :", i);
+ pr_debug("\t\t%20s %08x\n", "vmid is :", pdd->qpd.vmid);
+ pr_debug("\t\t%20s %08x\n", "Address Low is :",
+ addrLo.bitfields.addr);
+ pr_debug("\t\t%20s %08x\n", "Address high is :",
+ addrHi.bitfields.addr);
+ pr_debug("\t\t%20s %08x\n", "Address high is :",
+ addrHi.bitfields.addr);
+ pr_debug("\t\t%20s %08x\n", "Control Mask is :",
+ cntl.bitfields.mask);
+ pr_debug("\t\t%20s %08x\n", "Control Mode is :",
+ cntl.bitfields.mode);
+ pr_debug("\t\t%20s %08x\n", "Control Vmid is :",
+ cntl.bitfields.vmid);
+ pr_debug("\t\t%20s %08x\n", "Control atc is :",
+ cntl.bitfields.atc);
+ pr_debug("\t\t%30s\n", "* * * * * * * * * * * * * * * * * *");
+
+ pdd->dev->kfd2kgd->address_watch_execute(
+ dbgdev->dev->kgd,
+ i,
+ cntl.u32All,
+ addrHi.u32All,
+ addrLo.u32All);
+ }
+
+ return 0;
+}
+
+static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
+ struct dbg_address_watch_info *adw_info)
+{
+ struct pm4__set_config_reg *packets_vec;
+ union TCP_WATCH_ADDR_H_BITS addrHi;
+ union TCP_WATCH_ADDR_L_BITS addrLo;
+ union TCP_WATCH_CNTL_BITS cntl;
+ struct kfd_mem_obj *mem_obj;
+ unsigned int aw_reg_add_dword;
+ uint32_t *packet_buff_uint;
+ unsigned int i;
+ int status;
+ size_t ib_size = sizeof(struct pm4__set_config_reg) * 4;
+ /* we do not control the vmid in DIQ mode, just a place holder */
+ unsigned int vmid = 0;
+
+ BUG_ON(!dbgdev || !dbgdev->dev || !adw_info);
+
+ addrHi.u32All = 0;
+ addrLo.u32All = 0;
+ cntl.u32All = 0;
+
+ if ((adw_info->num_watch_points > MAX_WATCH_ADDRESSES) ||
+ (adw_info->num_watch_points == 0)) {
+ pr_err("amdkfd: num_watch_points is invalid\n");
+ return -EINVAL;
+ }
+
+ if ((NULL == adw_info->watch_mode) ||
+ (NULL == adw_info->watch_address)) {
+ pr_err("amdkfd: adw_info fields are not valid\n");
+ return -EINVAL;
+ }
+
+ status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj);
+
+ if (status != 0) {
+ pr_err("amdkfd: Failed to allocate GART memory\n");
+ return status;
+ }
+
+ packet_buff_uint = mem_obj->cpu_ptr;
+
+ memset(packet_buff_uint, 0, ib_size);
+
+ packets_vec = (struct pm4__set_config_reg *) (packet_buff_uint);
+
+ packets_vec[0].header.count = 1;
+ packets_vec[0].header.opcode = IT_SET_CONFIG_REG;
+ packets_vec[0].header.type = PM4_TYPE_3;
+ packets_vec[0].bitfields2.vmid_shift = ADDRESS_WATCH_CNTL_OFFSET;
+ packets_vec[0].bitfields2.insert_vmid = 1;
+ packets_vec[1].ordinal1 = packets_vec[0].ordinal1;
+ packets_vec[1].bitfields2.insert_vmid = 0;
+ packets_vec[2].ordinal1 = packets_vec[0].ordinal1;
+ packets_vec[2].bitfields2.insert_vmid = 0;
+ packets_vec[3].ordinal1 = packets_vec[0].ordinal1;
+ packets_vec[3].bitfields2.vmid_shift = ADDRESS_WATCH_CNTL_OFFSET;
+ packets_vec[3].bitfields2.insert_vmid = 1;
+
+ for (i = 0; i < adw_info->num_watch_points; i++) {
+ dbgdev_address_watch_set_registers(adw_info,
+ &addrHi,
+ &addrLo,
+ &cntl,
+ i,
+ vmid);
+
+ pr_debug("\t\t%30s\n", "* * * * * * * * * * * * * * * * * *");
+ pr_debug("\t\t%20s %08x\n", "register index :", i);
+ pr_debug("\t\t%20s %08x\n", "vmid is :", vmid);
+ pr_debug("\t\t%20s %p\n", "Add ptr is :",
+ adw_info->watch_address);
+ pr_debug("\t\t%20s %08llx\n", "Add is :",
+ adw_info->watch_address[i]);
+ pr_debug("\t\t%20s %08x\n", "Address Low is :",
+ addrLo.bitfields.addr);
+ pr_debug("\t\t%20s %08x\n", "Address high is :",
+ addrHi.bitfields.addr);
+ pr_debug("\t\t%20s %08x\n", "Control Mask is :",
+ cntl.bitfields.mask);
+ pr_debug("\t\t%20s %08x\n", "Control Mode is :",
+ cntl.bitfields.mode);
+ pr_debug("\t\t%20s %08x\n", "Control Vmid is :",
+ cntl.bitfields.vmid);
+ pr_debug("\t\t%20s %08x\n", "Control atc is :",
+ cntl.bitfields.atc);
+ pr_debug("\t\t%30s\n", "* * * * * * * * * * * * * * * * * *");
+
+ aw_reg_add_dword =
+ dbgdev->dev->kfd2kgd->address_watch_get_offset(
+ dbgdev->dev->kgd,
+ i,
+ ADDRESS_WATCH_REG_CNTL);
+
+ aw_reg_add_dword /= sizeof(uint32_t);
+
+ packets_vec[0].bitfields2.reg_offset =
+ aw_reg_add_dword - AMD_CONFIG_REG_BASE;
+
+ packets_vec[0].reg_data[0] = cntl.u32All;
+
+ aw_reg_add_dword =
+ dbgdev->dev->kfd2kgd->address_watch_get_offset(
+ dbgdev->dev->kgd,
+ i,
+ ADDRESS_WATCH_REG_ADDR_HI);
+
+ aw_reg_add_dword /= sizeof(uint32_t);
+
+ packets_vec[1].bitfields2.reg_offset =
+ aw_reg_add_dword - AMD_CONFIG_REG_BASE;
+ packets_vec[1].reg_data[0] = addrHi.u32All;
+
+ aw_reg_add_dword =
+ dbgdev->dev->kfd2kgd->address_watch_get_offset(
+ dbgdev->dev->kgd,
+ i,
+ ADDRESS_WATCH_REG_ADDR_LO);
+
+ aw_reg_add_dword /= sizeof(uint32_t);
+
+ packets_vec[2].bitfields2.reg_offset =
+ aw_reg_add_dword - AMD_CONFIG_REG_BASE;
+ packets_vec[2].reg_data[0] = addrLo.u32All;
+
+ /* enable watch flag if address is not zero*/
+ if (adw_info->watch_address[i] > 0)
+ cntl.bitfields.valid = 1;
+ else
+ cntl.bitfields.valid = 0;
+
+ aw_reg_add_dword =
+ dbgdev->dev->kfd2kgd->address_watch_get_offset(
+ dbgdev->dev->kgd,
+ i,
+ ADDRESS_WATCH_REG_CNTL);
+
+ aw_reg_add_dword /= sizeof(uint32_t);
+
+ packets_vec[3].bitfields2.reg_offset =
+ aw_reg_add_dword - AMD_CONFIG_REG_BASE;
+ packets_vec[3].reg_data[0] = cntl.u32All;
+
+ status = dbgdev_diq_submit_ib(
+ dbgdev,
+ adw_info->process->pasid,
+ mem_obj->gpu_addr,
+ packet_buff_uint,
+ ib_size);
+
+ if (status != 0) {
+ pr_err("amdkfd: Failed to submit IB to DIQ\n");
+ break;
+ }
+ }
+
+ kfd_gtt_sa_free(dbgdev->dev, mem_obj);
+ return status;
+}
+
+static int dbgdev_wave_control_set_registers(
+ struct dbg_wave_control_info *wac_info,
+ union SQ_CMD_BITS *in_reg_sq_cmd,
+ union GRBM_GFX_INDEX_BITS *in_reg_gfx_index)
+{
+ int status;
+ union SQ_CMD_BITS reg_sq_cmd;
+ union GRBM_GFX_INDEX_BITS reg_gfx_index;
+ struct HsaDbgWaveMsgAMDGen2 *pMsg;
+
+ BUG_ON(!wac_info || !in_reg_sq_cmd || !in_reg_gfx_index);
+
+ reg_sq_cmd.u32All = 0;
+ reg_gfx_index.u32All = 0;
+ pMsg = &wac_info->dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2;
+
+ switch (wac_info->mode) {
+ /* Send command to single wave */
+ case HSA_DBG_WAVEMODE_SINGLE:
+ /*
+ * Limit access to the process waves only,
+ * by setting vmid check
+ */
+ reg_sq_cmd.bits.check_vmid = 1;
+ reg_sq_cmd.bits.simd_id = pMsg->ui32.SIMD;
+ reg_sq_cmd.bits.wave_id = pMsg->ui32.WaveId;
+ reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_SINGLE;
+
+ reg_gfx_index.bits.sh_index = pMsg->ui32.ShaderArray;
+ reg_gfx_index.bits.se_index = pMsg->ui32.ShaderEngine;
+ reg_gfx_index.bits.instance_index = pMsg->ui32.HSACU;
+
+ break;
+
+ /* Send command to all waves with matching VMID */
+ case HSA_DBG_WAVEMODE_BROADCAST_PROCESS:
+
+ reg_gfx_index.bits.sh_broadcast_writes = 1;
+ reg_gfx_index.bits.se_broadcast_writes = 1;
+ reg_gfx_index.bits.instance_broadcast_writes = 1;
+
+ reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_BROADCAST;
+
+ break;
+
+ /* Send command to all CU waves with matching VMID */
+ case HSA_DBG_WAVEMODE_BROADCAST_PROCESS_CU:
+
+ reg_sq_cmd.bits.check_vmid = 1;
+ reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_BROADCAST;
+
+ reg_gfx_index.bits.sh_index = pMsg->ui32.ShaderArray;
+ reg_gfx_index.bits.se_index = pMsg->ui32.ShaderEngine;
+ reg_gfx_index.bits.instance_index = pMsg->ui32.HSACU;
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ switch (wac_info->operand) {
+ case HSA_DBG_WAVEOP_HALT:
+ reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_HALT;
+ break;
+
+ case HSA_DBG_WAVEOP_RESUME:
+ reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_RESUME;
+ break;
+
+ case HSA_DBG_WAVEOP_KILL:
+ reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL;
+ break;
+
+ case HSA_DBG_WAVEOP_DEBUG:
+ reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_DEBUG;
+ break;
+
+ case HSA_DBG_WAVEOP_TRAP:
+ if (wac_info->trapId < MAX_TRAPID) {
+ reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_TRAP;
+ reg_sq_cmd.bits.trap_id = wac_info->trapId;
+ } else {
+ status = -EINVAL;
+ }
+ break;
+
+ default:
+ status = -EINVAL;
+ break;
+ }
+
+ if (status == 0) {
+ *in_reg_sq_cmd = reg_sq_cmd;
+ *in_reg_gfx_index = reg_gfx_index;
+ }
+
+ return status;
+}
+
+static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
+ struct dbg_wave_control_info *wac_info)
+{
+
+ int status;
+ union SQ_CMD_BITS reg_sq_cmd;
+ union GRBM_GFX_INDEX_BITS reg_gfx_index;
+ struct kfd_mem_obj *mem_obj;
+ uint32_t *packet_buff_uint;
+ struct pm4__set_config_reg *packets_vec;
+ size_t ib_size = sizeof(struct pm4__set_config_reg) * 3;
+
+ BUG_ON(!dbgdev || !wac_info);
+
+ reg_sq_cmd.u32All = 0;
+
+ status = dbgdev_wave_control_set_registers(wac_info, &reg_sq_cmd,
+ &reg_gfx_index);
+ if (status) {
+ pr_err("amdkfd: Failed to set wave control registers\n");
+ return status;
+ }
+
+ /* we do not control the VMID in DIQ,so reset it to a known value */
+ reg_sq_cmd.bits.vm_id = 0;
+
+ pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *");
+
+ pr_debug("\t\t mode is: %u\n", wac_info->mode);
+ pr_debug("\t\t operand is: %u\n", wac_info->operand);
+ pr_debug("\t\t trap id is: %u\n", wac_info->trapId);
+ pr_debug("\t\t msg value is: %u\n",
+ wac_info->dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value);
+ pr_debug("\t\t vmid is: N/A\n");
+
+ pr_debug("\t\t chk_vmid is : %u\n", reg_sq_cmd.bitfields.check_vmid);
+ pr_debug("\t\t command is : %u\n", reg_sq_cmd.bitfields.cmd);
+ pr_debug("\t\t queue id is : %u\n", reg_sq_cmd.bitfields.queue_id);
+ pr_debug("\t\t simd id is : %u\n", reg_sq_cmd.bitfields.simd_id);
+ pr_debug("\t\t mode is : %u\n", reg_sq_cmd.bitfields.mode);
+ pr_debug("\t\t vm_id is : %u\n", reg_sq_cmd.bitfields.vm_id);
+ pr_debug("\t\t wave_id is : %u\n", reg_sq_cmd.bitfields.wave_id);
+
+ pr_debug("\t\t ibw is : %u\n",
+ reg_gfx_index.bitfields.instance_broadcast_writes);
+ pr_debug("\t\t ii is : %u\n",
+ reg_gfx_index.bitfields.instance_index);
+ pr_debug("\t\t sebw is : %u\n",
+ reg_gfx_index.bitfields.se_broadcast_writes);
+ pr_debug("\t\t se_ind is : %u\n", reg_gfx_index.bitfields.se_index);
+ pr_debug("\t\t sh_ind is : %u\n", reg_gfx_index.bitfields.sh_index);
+ pr_debug("\t\t sbw is : %u\n",
+ reg_gfx_index.bitfields.sh_broadcast_writes);
+
+ pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *");
+
+ status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj);
+
+ if (status != 0) {
+ pr_err("amdkfd: Failed to allocate GART memory\n");
+ return status;
+ }
+
+ packet_buff_uint = mem_obj->cpu_ptr;
+
+ memset(packet_buff_uint, 0, ib_size);
+
+ packets_vec = (struct pm4__set_config_reg *) packet_buff_uint;
+ packets_vec[0].header.count = 1;
+ packets_vec[0].header.opcode = IT_SET_UCONFIG_REG;
+ packets_vec[0].header.type = PM4_TYPE_3;
+ packets_vec[0].bitfields2.reg_offset =
+ GRBM_GFX_INDEX / (sizeof(uint32_t)) -
+ USERCONFIG_REG_BASE;
+
+ packets_vec[0].bitfields2.insert_vmid = 0;
+ packets_vec[0].reg_data[0] = reg_gfx_index.u32All;
+
+ packets_vec[1].header.count = 1;
+ packets_vec[1].header.opcode = IT_SET_CONFIG_REG;
+ packets_vec[1].header.type = PM4_TYPE_3;
+ packets_vec[1].bitfields2.reg_offset = SQ_CMD / (sizeof(uint32_t)) -
+ AMD_CONFIG_REG_BASE;
+
+ packets_vec[1].bitfields2.vmid_shift = SQ_CMD_VMID_OFFSET;
+ packets_vec[1].bitfields2.insert_vmid = 1;
+ packets_vec[1].reg_data[0] = reg_sq_cmd.u32All;
+
+ /* Restore the GRBM_GFX_INDEX register */
+
+ reg_gfx_index.u32All = 0;
+ reg_gfx_index.bits.sh_broadcast_writes = 1;
+ reg_gfx_index.bits.instance_broadcast_writes = 1;
+ reg_gfx_index.bits.se_broadcast_writes = 1;
+
+
+ packets_vec[2].ordinal1 = packets_vec[0].ordinal1;
+ packets_vec[2].bitfields2.reg_offset =
+ GRBM_GFX_INDEX / (sizeof(uint32_t)) -
+ USERCONFIG_REG_BASE;
+
+ packets_vec[2].bitfields2.insert_vmid = 0;
+ packets_vec[2].reg_data[0] = reg_gfx_index.u32All;
+
+ status = dbgdev_diq_submit_ib(
+ dbgdev,
+ wac_info->process->pasid,
+ mem_obj->gpu_addr,
+ packet_buff_uint,
+ ib_size);
+
+ if (status != 0)
+ pr_err("amdkfd: Failed to submit IB to DIQ\n");
+
+ kfd_gtt_sa_free(dbgdev->dev, mem_obj);
+
+ return status;
+}
+
+static int dbgdev_wave_control_nodiq(struct kfd_dbgdev *dbgdev,
+ struct dbg_wave_control_info *wac_info)
+{
+ int status;
+ union SQ_CMD_BITS reg_sq_cmd;
+ union GRBM_GFX_INDEX_BITS reg_gfx_index;
+ struct kfd_process_device *pdd;
+
+ BUG_ON(!dbgdev || !dbgdev->dev || !wac_info);
+
+ reg_sq_cmd.u32All = 0;
+
+ /* taking the VMID for that process on the safe way using PDD */
+ pdd = kfd_get_process_device_data(dbgdev->dev, wac_info->process);
+
+ if (!pdd) {
+ pr_err("amdkfd: Failed to get pdd for wave control no DIQ\n");
+ return -EFAULT;
+ }
+ status = dbgdev_wave_control_set_registers(wac_info, &reg_sq_cmd,
+ &reg_gfx_index);
+ if (status) {
+ pr_err("amdkfd: Failed to set wave control registers\n");
+ return status;
+ }
+
+ /* for non DIQ we need to patch the VMID: */
+
+ reg_sq_cmd.bits.vm_id = pdd->qpd.vmid;
+
+ pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *");
+
+ pr_debug("\t\t mode is: %u\n", wac_info->mode);
+ pr_debug("\t\t operand is: %u\n", wac_info->operand);
+ pr_debug("\t\t trap id is: %u\n", wac_info->trapId);
+ pr_debug("\t\t msg value is: %u\n",
+ wac_info->dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value);
+ pr_debug("\t\t vmid is: %u\n", pdd->qpd.vmid);
+
+ pr_debug("\t\t chk_vmid is : %u\n", reg_sq_cmd.bitfields.check_vmid);
+ pr_debug("\t\t command is : %u\n", reg_sq_cmd.bitfields.cmd);
+ pr_debug("\t\t queue id is : %u\n", reg_sq_cmd.bitfields.queue_id);
+ pr_debug("\t\t simd id is : %u\n", reg_sq_cmd.bitfields.simd_id);
+ pr_debug("\t\t mode is : %u\n", reg_sq_cmd.bitfields.mode);
+ pr_debug("\t\t vm_id is : %u\n", reg_sq_cmd.bitfields.vm_id);
+ pr_debug("\t\t wave_id is : %u\n", reg_sq_cmd.bitfields.wave_id);
+
+ pr_debug("\t\t ibw is : %u\n",
+ reg_gfx_index.bitfields.instance_broadcast_writes);
+ pr_debug("\t\t ii is : %u\n",
+ reg_gfx_index.bitfields.instance_index);
+ pr_debug("\t\t sebw is : %u\n",
+ reg_gfx_index.bitfields.se_broadcast_writes);
+ pr_debug("\t\t se_ind is : %u\n", reg_gfx_index.bitfields.se_index);
+ pr_debug("\t\t sh_ind is : %u\n", reg_gfx_index.bitfields.sh_index);
+ pr_debug("\t\t sbw is : %u\n",
+ reg_gfx_index.bitfields.sh_broadcast_writes);
+
+ pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *");
+
+ return dbgdev->dev->kfd2kgd->wave_control_execute(dbgdev->dev->kgd,
+ reg_gfx_index.u32All,
+ reg_sq_cmd.u32All);
+}
+
+int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
+{
+ int status = 0;
+ unsigned int vmid;
+ union SQ_CMD_BITS reg_sq_cmd;
+ union GRBM_GFX_INDEX_BITS reg_gfx_index;
+ struct kfd_process_device *pdd;
+ struct dbg_wave_control_info wac_info;
+ int temp;
+ int first_vmid_to_scan = 8;
+ int last_vmid_to_scan = 15;
+
+ first_vmid_to_scan = ffs(dev->shared_resources.compute_vmid_bitmap) - 1;
+ temp = dev->shared_resources.compute_vmid_bitmap >> first_vmid_to_scan;
+ last_vmid_to_scan = first_vmid_to_scan + ffz(temp);
+
+ reg_sq_cmd.u32All = 0;
+ status = 0;
+
+ wac_info.mode = HSA_DBG_WAVEMODE_BROADCAST_PROCESS;
+ wac_info.operand = HSA_DBG_WAVEOP_KILL;
+
+ pr_debug("Killing all process wavefronts\n");
+
+ /* Scan all registers in the range ATC_VMID8_PASID_MAPPING ..
+ * ATC_VMID15_PASID_MAPPING
+ * to check which VMID the current process is mapped to. */
+
+ for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
+ if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_valid
+ (dev->kgd, vmid)) {
+ if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_valid
+ (dev->kgd, vmid) == p->pasid) {
+ pr_debug("Killing wave fronts of vmid %d and pasid %d\n",
+ vmid, p->pasid);
+ break;
+ }
+ }
+ }
+
+ if (vmid > last_vmid_to_scan) {
+ pr_err("amdkfd: didn't found vmid for pasid (%d)\n", p->pasid);
+ return -EFAULT;
+ }
+
+ /* taking the VMID for that process on the safe way using PDD */
+ pdd = kfd_get_process_device_data(dev, p);
+ if (!pdd)
+ return -EFAULT;
+
+ status = dbgdev_wave_control_set_registers(&wac_info, &reg_sq_cmd,
+ &reg_gfx_index);
+ if (status != 0)
+ return -EINVAL;
+
+ /* for non DIQ we need to patch the VMID: */
+ reg_sq_cmd.bits.vm_id = vmid;
+
+ dev->kfd2kgd->wave_control_execute(dev->kgd,
+ reg_gfx_index.u32All,
+ reg_sq_cmd.u32All);
+
+ return 0;
+}
+
+void kfd_dbgdev_init(struct kfd_dbgdev *pdbgdev, struct kfd_dev *pdev,
+ enum DBGDEV_TYPE type)
+{
+ BUG_ON(!pdbgdev || !pdev);
+
+ pdbgdev->dev = pdev;
+ pdbgdev->kq = NULL;
+ pdbgdev->type = type;
+ pdbgdev->pqm = NULL;
+
+ switch (type) {
+ case DBGDEV_TYPE_NODIQ:
+ pdbgdev->dbgdev_register = dbgdev_register_nodiq;
+ pdbgdev->dbgdev_unregister = dbgdev_unregister_nodiq;
+ pdbgdev->dbgdev_wave_control = dbgdev_wave_control_nodiq;
+ pdbgdev->dbgdev_address_watch = dbgdev_address_watch_nodiq;
+ break;
+ case DBGDEV_TYPE_DIQ:
+ default:
+ pdbgdev->dbgdev_register = dbgdev_register_diq;
+ pdbgdev->dbgdev_unregister = dbgdev_unregister_diq;
+ pdbgdev->dbgdev_wave_control = dbgdev_wave_control_diq;
+ pdbgdev->dbgdev_address_watch = dbgdev_address_watch_diq;
+ break;
+ }
+
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h
new file mode 100644
index 000000000000..03424c20920c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef KFD_DBGDEV_H_
+#define KFD_DBGDEV_H_
+
+enum {
+ SQ_CMD_VMID_OFFSET = 28,
+ ADDRESS_WATCH_CNTL_OFFSET = 24
+};
+
+enum {
+ PRIV_QUEUE_SYNC_TIME_MS = 200
+};
+
+/* CONTEXT reg space definition */
+enum {
+ CONTEXT_REG_BASE = 0xA000,
+ CONTEXT_REG_END = 0xA400,
+ CONTEXT_REG_SIZE = CONTEXT_REG_END - CONTEXT_REG_BASE
+};
+
+/* USER CONFIG reg space definition */
+enum {
+ USERCONFIG_REG_BASE = 0xC000,
+ USERCONFIG_REG_END = 0x10000,
+ USERCONFIG_REG_SIZE = USERCONFIG_REG_END - USERCONFIG_REG_BASE
+};
+
+/* CONFIG reg space definition */
+enum {
+ AMD_CONFIG_REG_BASE = 0x2000, /* in dwords */
+ AMD_CONFIG_REG_END = 0x2B00,
+ AMD_CONFIG_REG_SIZE = AMD_CONFIG_REG_END - AMD_CONFIG_REG_BASE
+};
+
+/* SH reg space definition */
+enum {
+ SH_REG_BASE = 0x2C00,
+ SH_REG_END = 0x3000,
+ SH_REG_SIZE = SH_REG_END - SH_REG_BASE
+};
+
+enum SQ_IND_CMD_CMD {
+ SQ_IND_CMD_CMD_NULL = 0x00000000,
+ SQ_IND_CMD_CMD_HALT = 0x00000001,
+ SQ_IND_CMD_CMD_RESUME = 0x00000002,
+ SQ_IND_CMD_CMD_KILL = 0x00000003,
+ SQ_IND_CMD_CMD_DEBUG = 0x00000004,
+ SQ_IND_CMD_CMD_TRAP = 0x00000005,
+};
+
+enum SQ_IND_CMD_MODE {
+ SQ_IND_CMD_MODE_SINGLE = 0x00000000,
+ SQ_IND_CMD_MODE_BROADCAST = 0x00000001,
+ SQ_IND_CMD_MODE_BROADCAST_QUEUE = 0x00000002,
+ SQ_IND_CMD_MODE_BROADCAST_PIPE = 0x00000003,
+ SQ_IND_CMD_MODE_BROADCAST_ME = 0x00000004,
+};
+
+union SQ_IND_INDEX_BITS {
+ struct {
+ uint32_t wave_id:4;
+ uint32_t simd_id:2;
+ uint32_t thread_id:6;
+ uint32_t:1;
+ uint32_t force_read:1;
+ uint32_t read_timeout:1;
+ uint32_t unindexed:1;
+ uint32_t index:16;
+
+ } bitfields, bits;
+ uint32_t u32All;
+ signed int i32All;
+ float f32All;
+};
+
+union SQ_IND_CMD_BITS {
+ struct {
+ uint32_t data:32;
+ } bitfields, bits;
+ uint32_t u32All;
+ signed int i32All;
+ float f32All;
+};
+
+union SQ_CMD_BITS {
+ struct {
+ uint32_t cmd:3;
+ uint32_t:1;
+ uint32_t mode:3;
+ uint32_t check_vmid:1;
+ uint32_t trap_id:3;
+ uint32_t:5;
+ uint32_t wave_id:4;
+ uint32_t simd_id:2;
+ uint32_t:2;
+ uint32_t queue_id:3;
+ uint32_t:1;
+ uint32_t vm_id:4;
+ } bitfields, bits;
+ uint32_t u32All;
+ signed int i32All;
+ float f32All;
+};
+
+union SQ_IND_DATA_BITS {
+ struct {
+ uint32_t data:32;
+ } bitfields, bits;
+ uint32_t u32All;
+ signed int i32All;
+ float f32All;
+};
+
+union GRBM_GFX_INDEX_BITS {
+ struct {
+ uint32_t instance_index:8;
+ uint32_t sh_index:8;
+ uint32_t se_index:8;
+ uint32_t:5;
+ uint32_t sh_broadcast_writes:1;
+ uint32_t instance_broadcast_writes:1;
+ uint32_t se_broadcast_writes:1;
+ } bitfields, bits;
+ uint32_t u32All;
+ signed int i32All;
+ float f32All;
+};
+
+union TCP_WATCH_ADDR_H_BITS {
+ struct {
+ uint32_t addr:16;
+ uint32_t:16;
+
+ } bitfields, bits;
+ uint32_t u32All;
+ signed int i32All;
+ float f32All;
+};
+
+union TCP_WATCH_ADDR_L_BITS {
+ struct {
+ uint32_t:6;
+ uint32_t addr:26;
+ } bitfields, bits;
+ uint32_t u32All;
+ signed int i32All;
+ float f32All;
+};
+
+enum {
+ QUEUESTATE__INVALID = 0, /* so by default we'll get invalid state */
+ QUEUESTATE__ACTIVE_COMPLETION_PENDING,
+ QUEUESTATE__ACTIVE
+};
+
+union ULARGE_INTEGER {
+ struct {
+ uint32_t low_part;
+ uint32_t high_part;
+ } u;
+ unsigned long long quad_part;
+};
+
+
+#define KFD_CIK_VMID_START_OFFSET (8)
+#define KFD_CIK_VMID_END_OFFSET (KFD_CIK_VMID_START_OFFSET + (8))
+
+
+void kfd_dbgdev_init(struct kfd_dbgdev *pdbgdev, struct kfd_dev *pdev,
+ enum DBGDEV_TYPE type);
+
+#endif /* KFD_DBGDEV_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
new file mode 100644
index 000000000000..56d676396342
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#include "kfd_priv.h"
+#include "cik_regs.h"
+#include "kfd_pm4_headers.h"
+#include "kfd_pm4_headers_diq.h"
+#include "kfd_dbgmgr.h"
+#include "kfd_dbgdev.h"
+
+static DEFINE_MUTEX(kfd_dbgmgr_mutex);
+
+struct mutex *kfd_get_dbgmgr_mutex(void)
+{
+ return &kfd_dbgmgr_mutex;
+}
+
+
+static void kfd_dbgmgr_uninitialize(struct kfd_dbgmgr *pmgr)
+{
+ BUG_ON(!pmgr);
+
+ kfree(pmgr->dbgdev);
+
+ pmgr->dbgdev = NULL;
+ pmgr->pasid = 0;
+ pmgr->dev = NULL;
+}
+
+void kfd_dbgmgr_destroy(struct kfd_dbgmgr *pmgr)
+{
+ if (pmgr != NULL) {
+ kfd_dbgmgr_uninitialize(pmgr);
+ kfree(pmgr);
+ }
+}
+
+bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
+{
+ enum DBGDEV_TYPE type = DBGDEV_TYPE_DIQ;
+ struct kfd_dbgmgr *new_buff;
+
+ BUG_ON(pdev == NULL);
+ BUG_ON(!pdev->init_complete);
+
+ new_buff = kfd_alloc_struct(new_buff);
+ if (!new_buff) {
+ pr_err("amdkfd: Failed to allocate dbgmgr instance\n");
+ return false;
+ }
+
+ new_buff->pasid = 0;
+ new_buff->dev = pdev;
+ new_buff->dbgdev = kfd_alloc_struct(new_buff->dbgdev);
+ if (!new_buff->dbgdev) {
+ pr_err("amdkfd: Failed to allocate dbgdev instance\n");
+ kfree(new_buff);
+ return false;
+ }
+
+ /* get actual type of DBGDevice cpsch or not */
+ if (sched_policy == KFD_SCHED_POLICY_NO_HWS)
+ type = DBGDEV_TYPE_NODIQ;
+
+ kfd_dbgdev_init(new_buff->dbgdev, pdev, type);
+ *ppmgr = new_buff;
+
+ return true;
+}
+
+long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
+{
+ BUG_ON(!p || !pmgr || !pmgr->dbgdev);
+
+ if (pmgr->pasid != 0) {
+ pr_debug("H/W debugger is already active using pasid %d\n",
+ pmgr->pasid);
+ return -EBUSY;
+ }
+
+ /* remember pasid */
+ pmgr->pasid = p->pasid;
+
+ /* provide the pqm for diq generation */
+ pmgr->dbgdev->pqm = &p->pqm;
+
+ /* activate the actual registering */
+ pmgr->dbgdev->dbgdev_register(pmgr->dbgdev);
+
+ return 0;
+}
+
+long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
+{
+ BUG_ON(!p || !pmgr || !pmgr->dbgdev);
+
+ /* Is the requests coming from the already registered process? */
+ if (pmgr->pasid != p->pasid) {
+ pr_debug("H/W debugger is not registered by calling pasid %d\n",
+ p->pasid);
+ return -EINVAL;
+ }
+
+ pmgr->dbgdev->dbgdev_unregister(pmgr->dbgdev);
+
+ pmgr->pasid = 0;
+
+ return 0;
+}
+
+long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr,
+ struct dbg_wave_control_info *wac_info)
+{
+ BUG_ON(!pmgr || !pmgr->dbgdev || !wac_info);
+
+ /* Is the requests coming from the already registered process? */
+ if (pmgr->pasid != wac_info->process->pasid) {
+ pr_debug("H/W debugger support was not registered for requester pasid %d\n",
+ wac_info->process->pasid);
+ return -EINVAL;
+ }
+
+ return (long) pmgr->dbgdev->dbgdev_wave_control(pmgr->dbgdev, wac_info);
+}
+
+long kfd_dbgmgr_address_watch(struct kfd_dbgmgr *pmgr,
+ struct dbg_address_watch_info *adw_info)
+{
+ BUG_ON(!pmgr || !pmgr->dbgdev || !adw_info);
+
+
+ /* Is the requests coming from the already registered process? */
+ if (pmgr->pasid != adw_info->process->pasid) {
+ pr_debug("H/W debugger support was not registered for requester pasid %d\n",
+ adw_info->process->pasid);
+ return -EINVAL;
+ }
+
+ return (long) pmgr->dbgdev->dbgdev_address_watch(pmgr->dbgdev,
+ adw_info);
+}
+
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h
new file mode 100644
index 000000000000..257a745ad0b5
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h
@@ -0,0 +1,294 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef KFD_DBGMGR_H_
+#define KFD_DBGMGR_H_
+
+#include "kfd_priv.h"
+
+/* must align with hsakmttypes definition */
+#pragma pack(push, 4)
+
+enum HSA_DBG_WAVEOP {
+ HSA_DBG_WAVEOP_HALT = 1, /* Halts a wavefront */
+ HSA_DBG_WAVEOP_RESUME = 2, /* Resumes a wavefront */
+ HSA_DBG_WAVEOP_KILL = 3, /* Kills a wavefront */
+ HSA_DBG_WAVEOP_DEBUG = 4, /* Causes wavefront to enter
+ debug mode */
+ HSA_DBG_WAVEOP_TRAP = 5, /* Causes wavefront to take
+ a trap */
+ HSA_DBG_NUM_WAVEOP = 5,
+ HSA_DBG_MAX_WAVEOP = 0xFFFFFFFF
+};
+
+enum HSA_DBG_WAVEMODE {
+ /* send command to a single wave */
+ HSA_DBG_WAVEMODE_SINGLE = 0,
+ /*
+ * Broadcast to all wavefronts of all processes is not
+ * supported for HSA user mode
+ */
+
+ /* send to waves within current process */
+ HSA_DBG_WAVEMODE_BROADCAST_PROCESS = 2,
+ /* send to waves within current process on CU */
+ HSA_DBG_WAVEMODE_BROADCAST_PROCESS_CU = 3,
+ HSA_DBG_NUM_WAVEMODE = 3,
+ HSA_DBG_MAX_WAVEMODE = 0xFFFFFFFF
+};
+
+enum HSA_DBG_WAVEMSG_TYPE {
+ HSA_DBG_WAVEMSG_AUTO = 0,
+ HSA_DBG_WAVEMSG_USER = 1,
+ HSA_DBG_WAVEMSG_ERROR = 2,
+ HSA_DBG_NUM_WAVEMSG,
+ HSA_DBG_MAX_WAVEMSG = 0xFFFFFFFF
+};
+
+enum HSA_DBG_WATCH_MODE {
+ HSA_DBG_WATCH_READ = 0, /* Read operations only */
+ HSA_DBG_WATCH_NONREAD = 1, /* Write or Atomic operations only */
+ HSA_DBG_WATCH_ATOMIC = 2, /* Atomic Operations only */
+ HSA_DBG_WATCH_ALL = 3, /* Read, Write or Atomic operations */
+ HSA_DBG_WATCH_NUM,
+ HSA_DBG_WATCH_SIZE = 0xFFFFFFFF
+};
+
+/* This structure is hardware specific and may change in the future */
+struct HsaDbgWaveMsgAMDGen2 {
+ union {
+ struct ui32 {
+ uint32_t UserData:8; /* user data */
+ uint32_t ShaderArray:1; /* Shader array */
+ uint32_t Priv:1; /* Privileged */
+ uint32_t Reserved0:4; /* This field is reserved,
+ should be 0 */
+ uint32_t WaveId:4; /* wave id */
+ uint32_t SIMD:2; /* SIMD id */
+ uint32_t HSACU:4; /* Compute unit */
+ uint32_t ShaderEngine:2;/* Shader engine */
+ uint32_t MessageType:2; /* see HSA_DBG_WAVEMSG_TYPE */
+ uint32_t Reserved1:4; /* This field is reserved,
+ should be 0 */
+ } ui32;
+ uint32_t Value;
+ };
+ uint32_t Reserved2;
+};
+
+union HsaDbgWaveMessageAMD {
+ struct HsaDbgWaveMsgAMDGen2 WaveMsgInfoGen2;
+ /* for future HsaDbgWaveMsgAMDGen3; */
+};
+
+struct HsaDbgWaveMessage {
+ void *MemoryVA; /* ptr to associated host-accessible data */
+ union HsaDbgWaveMessageAMD DbgWaveMsg;
+};
+
+/*
+ * TODO: This definitions to be MOVED to kfd_event, once it is implemented.
+ *
+ * HSA sync primitive, Event and HW Exception notification API definitions.
+ * The API functions allow the runtime to define a so-called sync-primitive,
+ * a SW object combining a user-mode provided "syncvar" and a scheduler event
+ * that can be signaled through a defined GPU interrupt. A syncvar is
+ * a process virtual memory location of a certain size that can be accessed
+ * by CPU and GPU shader code within the process to set and query the content
+ * within that memory. The definition of the content is determined by the HSA
+ * runtime and potentially GPU shader code interfacing with the HSA runtime.
+ * The syncvar values may be commonly written through an PM4 WRITE_DATA packet
+ * in the user mode instruction stream. The OS scheduler event is typically
+ * associated and signaled by an interrupt issued by the GPU, but other HSA
+ * system interrupt conditions from other HW (e.g. IOMMUv2) may be surfaced
+ * by the KFD by this mechanism, too. */
+
+/* these are the new definitions for events */
+enum HSA_EVENTTYPE {
+ HSA_EVENTTYPE_SIGNAL = 0, /* user-mode generated GPU signal */
+ HSA_EVENTTYPE_NODECHANGE = 1, /* HSA node change (attach/detach) */
+ HSA_EVENTTYPE_DEVICESTATECHANGE = 2, /* HSA device state change
+ (start/stop) */
+ HSA_EVENTTYPE_HW_EXCEPTION = 3, /* GPU shader exception event */
+ HSA_EVENTTYPE_SYSTEM_EVENT = 4, /* GPU SYSCALL with parameter info */
+ HSA_EVENTTYPE_DEBUG_EVENT = 5, /* GPU signal for debugging */
+ HSA_EVENTTYPE_PROFILE_EVENT = 6,/* GPU signal for profiling */
+ HSA_EVENTTYPE_QUEUE_EVENT = 7, /* GPU signal queue idle state
+ (EOP pm4) */
+ /* ... */
+ HSA_EVENTTYPE_MAXID,
+ HSA_EVENTTYPE_TYPE_SIZE = 0xFFFFFFFF
+};
+
+/* Sub-definitions for various event types: Syncvar */
+struct HsaSyncVar {
+ union SyncVar {
+ void *UserData; /* pointer to user mode data */
+ uint64_t UserDataPtrValue; /* 64bit compatibility of value */
+ } SyncVar;
+ uint64_t SyncVarSize;
+};
+
+/* Sub-definitions for various event types: NodeChange */
+
+enum HSA_EVENTTYPE_NODECHANGE_FLAGS {
+ HSA_EVENTTYPE_NODECHANGE_ADD = 0,
+ HSA_EVENTTYPE_NODECHANGE_REMOVE = 1,
+ HSA_EVENTTYPE_NODECHANGE_SIZE = 0xFFFFFFFF
+};
+
+struct HsaNodeChange {
+ /* HSA node added/removed on the platform */
+ enum HSA_EVENTTYPE_NODECHANGE_FLAGS Flags;
+};
+
+/* Sub-definitions for various event types: DeviceStateChange */
+enum HSA_EVENTTYPE_DEVICESTATECHANGE_FLAGS {
+ /* device started (and available) */
+ HSA_EVENTTYPE_DEVICESTATUSCHANGE_START = 0,
+ /* device stopped (i.e. unavailable) */
+ HSA_EVENTTYPE_DEVICESTATUSCHANGE_STOP = 1,
+ HSA_EVENTTYPE_DEVICESTATUSCHANGE_SIZE = 0xFFFFFFFF
+};
+
+enum HSA_DEVICE {
+ HSA_DEVICE_CPU = 0,
+ HSA_DEVICE_GPU = 1,
+ MAX_HSA_DEVICE = 2
+};
+
+struct HsaDeviceStateChange {
+ uint32_t NodeId; /* F-NUMA node that contains the device */
+ enum HSA_DEVICE Device; /* device type: GPU or CPU */
+ enum HSA_EVENTTYPE_DEVICESTATECHANGE_FLAGS Flags; /* event flags */
+};
+
+struct HsaEventData {
+ enum HSA_EVENTTYPE EventType; /* event type */
+ union EventData {
+ /*
+ * return data associated with HSA_EVENTTYPE_SIGNAL
+ * and other events
+ */
+ struct HsaSyncVar SyncVar;
+
+ /* data associated with HSA_EVENTTYPE_NODE_CHANGE */
+ struct HsaNodeChange NodeChangeState;
+
+ /* data associated with HSA_EVENTTYPE_DEVICE_STATE_CHANGE */
+ struct HsaDeviceStateChange DeviceState;
+ } EventData;
+
+ /* the following data entries are internal to the KFD & thunk itself */
+
+ /* internal thunk store for Event data (OsEventHandle) */
+ uint64_t HWData1;
+ /* internal thunk store for Event data (HWAddress) */
+ uint64_t HWData2;
+ /* internal thunk store for Event data (HWData) */
+ uint32_t HWData3;
+};
+
+struct HsaEventDescriptor {
+ /* event type to allocate */
+ enum HSA_EVENTTYPE EventType;
+ /* H-NUMA node containing GPU device that is event source */
+ uint32_t NodeId;
+ /* pointer to user mode syncvar data, syncvar->UserDataPtrValue
+ * may be NULL
+ */
+ struct HsaSyncVar SyncVar;
+};
+
+struct HsaEvent {
+ uint32_t EventId;
+ struct HsaEventData EventData;
+};
+
+#pragma pack(pop)
+
+enum DBGDEV_TYPE {
+ DBGDEV_TYPE_ILLEGAL = 0,
+ DBGDEV_TYPE_NODIQ = 1,
+ DBGDEV_TYPE_DIQ = 2,
+ DBGDEV_TYPE_TEST = 3
+};
+
+struct dbg_address_watch_info {
+ struct kfd_process *process;
+ enum HSA_DBG_WATCH_MODE *watch_mode;
+ uint64_t *watch_address;
+ uint64_t *watch_mask;
+ struct HsaEvent *watch_event;
+ uint32_t num_watch_points;
+};
+
+struct dbg_wave_control_info {
+ struct kfd_process *process;
+ uint32_t trapId;
+ enum HSA_DBG_WAVEOP operand;
+ enum HSA_DBG_WAVEMODE mode;
+ struct HsaDbgWaveMessage dbgWave_msg;
+};
+
+struct kfd_dbgdev {
+
+ /* The device that owns this data. */
+ struct kfd_dev *dev;
+
+ /* kernel queue for DIQ */
+ struct kernel_queue *kq;
+
+ /* a pointer to the pqm of the calling process */
+ struct process_queue_manager *pqm;
+
+ /* type of debug device ( DIQ, non DIQ, etc. ) */
+ enum DBGDEV_TYPE type;
+
+ /* virtualized function pointers to device dbg */
+ int (*dbgdev_register)(struct kfd_dbgdev *dbgdev);
+ int (*dbgdev_unregister)(struct kfd_dbgdev *dbgdev);
+ int (*dbgdev_address_watch)(struct kfd_dbgdev *dbgdev,
+ struct dbg_address_watch_info *adw_info);
+ int (*dbgdev_wave_control)(struct kfd_dbgdev *dbgdev,
+ struct dbg_wave_control_info *wac_info);
+
+};
+
+struct kfd_dbgmgr {
+ unsigned int pasid;
+ struct kfd_dev *dev;
+ struct kfd_dbgdev *dbgdev;
+};
+
+/* prototypes for debug manager functions */
+struct mutex *kfd_get_dbgmgr_mutex(void);
+void kfd_dbgmgr_destroy(struct kfd_dbgmgr *pmgr);
+bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev);
+long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p);
+long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p);
+long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr,
+ struct dbg_wave_control_info *wac_info);
+long kfd_dbgmgr_address_watch(struct kfd_dbgmgr *pmgr,
+ struct dbg_address_watch_info *adw_info);
+#endif /* KFD_DBGMGR_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index ca7f2d3af2ff..75312c82969f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -33,14 +33,21 @@
static const struct kfd_device_info kaveri_device_info = {
.asic_family = CHIP_KAVERI,
.max_pasid_bits = 16,
+ /* max num of queues for KV.TODO should be a dynamic value */
+ .max_no_of_hqd = 24,
.ih_ring_entry_size = 4 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_cik,
+ .num_of_watch_points = 4,
.mqd_size_aligned = MQD_SIZE_ALIGNED
};
static const struct kfd_device_info carrizo_device_info = {
.asic_family = CHIP_CARRIZO,
.max_pasid_bits = 16,
+ /* max num of queues for CZ.TODO should be a dynamic value */
+ .max_no_of_hqd = 24,
.ih_ring_entry_size = 4 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_cik,
.num_of_watch_points = 4,
.mqd_size_aligned = MQD_SIZE_ALIGNED
};
@@ -181,6 +188,32 @@ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
kfd_unbind_process_from_device(dev, pasid);
}
+/*
+ * This function called by IOMMU driver on PPR failure
+ */
+static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
+ unsigned long address, u16 flags)
+{
+ struct kfd_dev *dev;
+
+ dev_warn(kfd_device,
+ "Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X",
+ PCI_BUS_NUM(pdev->devfn),
+ PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn),
+ pasid,
+ address,
+ flags);
+
+ dev = kfd_device_by_pci_dev(pdev);
+ BUG_ON(dev == NULL);
+
+ kfd_signal_iommu_event(dev, pasid, address,
+ flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC);
+
+ return AMD_IOMMU_INV_PRI_RSP_INVALID;
+}
+
bool kgd2kfd_device_init(struct kfd_dev *kfd,
const struct kgd2kfd_shared_resources *gpu_resources)
{
@@ -235,6 +268,13 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto kfd_topology_add_device_error;
}
+ if (kfd_interrupt_init(kfd)) {
+ dev_err(kfd_device,
+ "Error initializing interrupts for device (%x:%x)\n",
+ kfd->pdev->vendor, kfd->pdev->device);
+ goto kfd_interrupt_error;
+ }
+
if (!device_iommu_pasid_init(kfd)) {
dev_err(kfd_device,
"Error initializing iommuv2 for device (%x:%x)\n",
@@ -243,6 +283,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
}
amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
iommu_pasid_shutdown_callback);
+ amd_iommu_set_invalid_ppr_cb(kfd->pdev, iommu_invalid_ppr_cb);
kfd->dqm = device_queue_manager_init(kfd);
if (!kfd->dqm) {
@@ -259,6 +300,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto dqm_start_error;
}
+ kfd->dbgmgr = NULL;
+
kfd->init_complete = true;
dev_info(kfd_device, "added device (%x:%x)\n", kfd->pdev->vendor,
kfd->pdev->device);
@@ -273,6 +316,8 @@ dqm_start_error:
device_queue_manager_error:
amd_iommu_free_device(kfd->pdev);
device_iommu_pasid_error:
+ kfd_interrupt_exit(kfd);
+kfd_interrupt_error:
kfd_topology_remove_device(kfd);
kfd_topology_add_device_error:
kfd_gtt_sa_fini(kfd);
@@ -290,6 +335,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
if (kfd->init_complete) {
device_queue_manager_uninit(kfd->dqm);
amd_iommu_free_device(kfd->pdev);
+ kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd);
kfd_gtt_sa_fini(kfd);
kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
@@ -305,6 +351,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd)
if (kfd->init_complete) {
kfd->dqm->ops.stop(kfd->dqm);
amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
+ amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL);
amd_iommu_free_device(kfd->pdev);
}
}
@@ -324,6 +371,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
return -ENXIO;
amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
iommu_pasid_shutdown_callback);
+ amd_iommu_set_invalid_ppr_cb(kfd->pdev, iommu_invalid_ppr_cb);
kfd->dqm->ops.start(kfd->dqm);
}
@@ -333,7 +381,17 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
/* This is called directly from KGD at ISR. */
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
- /* Process interrupts / schedule work as necessary */
+ if (!kfd->init_complete)
+ return;
+
+ spin_lock(&kfd->interrupt_lock);
+
+ if (kfd->interrupts_active
+ && interrupt_is_wanted(kfd, ih_ring_entry)
+ && enqueue_ih_ring_entry(kfd, ih_ring_entry))
+ schedule_work(&kfd->interrupt_work);
+
+ spin_unlock(&kfd->interrupt_lock);
}
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 596ee5cd3b84..4bb7f4223762 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -45,7 +45,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
-static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock);
+static int destroy_queues_cpsch(struct device_queue_manager *dqm,
+ bool preempt_static_queues, bool lock);
static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
@@ -523,6 +524,17 @@ int init_pipelines(struct device_queue_manager *dqm,
return 0;
}
+static void init_interrupts(struct device_queue_manager *dqm)
+{
+ unsigned int i;
+
+ BUG_ON(dqm == NULL);
+
+ for (i = 0 ; i < get_pipes_num(dqm) ; i++)
+ dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd,
+ i + get_first_pipe(dqm));
+}
+
static int init_scheduler(struct device_queue_manager *dqm)
{
int retval;
@@ -582,6 +594,7 @@ static void uninitialize_nocpsch(struct device_queue_manager *dqm)
static int start_nocpsch(struct device_queue_manager *dqm)
{
+ init_interrupts(dqm);
return 0;
}
@@ -615,19 +628,6 @@ static void deallocate_sdma_queue(struct device_queue_manager *dqm,
set_bit(sdma_queue_id, (unsigned long *)&dqm->sdma_bitmap);
}
-static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
- struct qcm_process_device *qpd)
-{
- uint32_t value = SDMA_ATC;
-
- if (q->process->is_32bit_user_mode)
- value |= SDMA_VA_PTR32 | get_sh_mem_bases_32(qpd_to_pdd(qpd));
- else
- value |= SDMA_VA_SHARED_BASE(get_sh_mem_bases_nybble_64(
- qpd_to_pdd(qpd)));
- q->properties.sdma_vm_addr = value;
-}
-
static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd)
@@ -650,7 +650,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id);
pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id);
- init_sdma_vm(dqm, q, qpd);
+ dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval != 0) {
@@ -751,6 +751,9 @@ static int start_cpsch(struct device_queue_manager *dqm)
dqm->fence_addr = dqm->fence_mem->cpu_ptr;
dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
+
+ init_interrupts(dqm);
+
list_for_each_entry(node, &dqm->queues, list)
if (node->qpd->pqm->process && dqm->dev)
kfd_bind_process_to_device(dqm->dev,
@@ -773,7 +776,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
BUG_ON(!dqm);
- destroy_queues_cpsch(dqm, true);
+ destroy_queues_cpsch(dqm, true, true);
list_for_each_entry(node, &dqm->queues, list) {
pdd = qpd_to_pdd(node->qpd);
@@ -827,7 +830,8 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
pr_debug("kfd: In %s\n", __func__);
mutex_lock(&dqm->lock);
- destroy_queues_cpsch(dqm, false);
+ /* here we actually preempt the DIQ */
+ destroy_queues_cpsch(dqm, true, false);
list_del(&kq->list);
dqm->queue_count--;
qpd->is_debug = false;
@@ -883,8 +887,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
return -ENOMEM;
}
- init_sdma_vm(dqm, q, qpd);
-
+ dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval != 0)
@@ -912,7 +915,7 @@ out:
return retval;
}
-static int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
+int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
unsigned int fence_value,
unsigned long timeout)
{
@@ -934,13 +937,16 @@ static int destroy_sdma_queues(struct device_queue_manager *dqm,
unsigned int sdma_engine)
{
return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
- KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false,
+ KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES, 0, false,
sdma_engine);
}
-static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
+static int destroy_queues_cpsch(struct device_queue_manager *dqm,
+ bool preempt_static_queues, bool lock)
{
int retval;
+ enum kfd_preempt_type_filter preempt_type;
+ struct kfd_process_device *pdd;
BUG_ON(!dqm);
@@ -959,8 +965,12 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
destroy_sdma_queues(dqm, 1);
}
+ preempt_type = preempt_static_queues ?
+ KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES :
+ KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES;
+
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
- KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false, 0);
+ preempt_type, 0, false, 0);
if (retval != 0)
goto out;
@@ -968,8 +978,14 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
KFD_FENCE_COMPLETED);
/* should be timed out */
- amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
+ retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
+ if (retval != 0) {
+ pdd = kfd_get_process_device_data(dqm->dev,
+ kfd_get_process(current));
+ pdd->reset_wavefronts = true;
+ goto out;
+ }
pm_release_ib(&dqm->packets);
dqm->active_runlist = false;
@@ -988,7 +1004,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
if (lock)
mutex_lock(&dqm->lock);
- retval = destroy_queues_cpsch(dqm, false);
+ retval = destroy_queues_cpsch(dqm, false, false);
if (retval != 0) {
pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption");
goto out;
@@ -1023,13 +1039,27 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
{
int retval;
struct mqd_manager *mqd;
+ bool preempt_all_queues;
BUG_ON(!dqm || !qpd || !q);
+ preempt_all_queues = false;
+
retval = 0;
/* remove queue from list to prevent rescheduling after preemption */
mutex_lock(&dqm->lock);
+
+ if (qpd->is_debug) {
+ /*
+ * error, currently we do not allow to destroy a queue
+ * of a currently debugged process
+ */
+ retval = -EBUSY;
+ goto failed_try_destroy_debugged_queue;
+
+ }
+
mqd = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
if (!mqd) {
@@ -1061,6 +1091,8 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
return 0;
failed:
+failed_try_destroy_debugged_queue:
+
mutex_unlock(&dqm->lock);
return retval;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 488f51d19427..ec4036a09f3e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -88,9 +88,11 @@ struct device_queue_manager_ops {
struct queue *q,
struct qcm_process_device *qpd,
int *allocate_vmid);
+
int (*destroy_queue)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q);
+
int (*update_queue)(struct device_queue_manager *dqm,
struct queue *q);
@@ -100,8 +102,10 @@ struct device_queue_manager_ops {
int (*register_process)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
+
int (*unregister_process)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
+
int (*initialize)(struct device_queue_manager *dqm);
int (*start)(struct device_queue_manager *dqm);
int (*stop)(struct device_queue_manager *dqm);
@@ -109,15 +113,32 @@ struct device_queue_manager_ops {
int (*create_kernel_queue)(struct device_queue_manager *dqm,
struct kernel_queue *kq,
struct qcm_process_device *qpd);
+
void (*destroy_kernel_queue)(struct device_queue_manager *dqm,
struct kernel_queue *kq,
struct qcm_process_device *qpd);
+
+ bool (*set_cache_memory_policy)(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size);
+};
+
+struct device_queue_manager_asic_ops {
+ int (*register_process)(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
+ int (*initialize)(struct device_queue_manager *dqm);
bool (*set_cache_memory_policy)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size);
+ void (*init_sdma_vm)(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd);
};
/**
@@ -134,7 +155,7 @@ struct device_queue_manager_ops {
struct device_queue_manager {
struct device_queue_manager_ops ops;
- struct device_queue_manager_ops ops_asic_specific;
+ struct device_queue_manager_asic_ops ops_asic_specific;
struct mqd_manager *mqds[KFD_MQD_TYPE_MAX];
struct packet_manager packets;
@@ -157,8 +178,8 @@ struct device_queue_manager {
bool active_runlist;
};
-void device_queue_manager_init_cik(struct device_queue_manager_ops *ops);
-void device_queue_manager_init_vi(struct device_queue_manager_ops *ops);
+void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops);
+void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops);
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
int init_pipelines(struct device_queue_manager *dqm,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index 5469efe0523e..9ce8a20a7aff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -33,12 +33,15 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
static int register_process_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static int initialize_cpsch_cik(struct device_queue_manager *dqm);
+static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
+ struct qcm_process_device *qpd);
-void device_queue_manager_init_cik(struct device_queue_manager_ops *ops)
+void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops)
{
ops->set_cache_memory_policy = set_cache_memory_policy_cik;
ops->register_process = register_process_cik;
ops->initialize = initialize_cpsch_cik;
+ ops->init_sdma_vm = init_sdma_vm;
}
static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
@@ -129,6 +132,19 @@ static int register_process_cik(struct device_queue_manager *dqm,
return 0;
}
+static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
+ struct qcm_process_device *qpd)
+{
+ uint32_t value = SDMA_ATC;
+
+ if (q->process->is_32bit_user_mode)
+ value |= SDMA_VA_PTR32 | get_sh_mem_bases_32(qpd_to_pdd(qpd));
+ else
+ value |= SDMA_VA_SHARED_BASE(get_sh_mem_bases_nybble_64(
+ qpd_to_pdd(qpd)));
+ q->properties.sdma_vm_addr = value;
+}
+
static int initialize_cpsch_cik(struct device_queue_manager *dqm)
{
return init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index 20553dcd257d..4c15212a3899 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -32,14 +32,17 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
static int register_process_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static int initialize_cpsch_vi(struct device_queue_manager *dqm);
+static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
+ struct qcm_process_device *qpd);
-void device_queue_manager_init_vi(struct device_queue_manager_ops *ops)
+void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops)
{
pr_warn("amdkfd: VI DQM is not currently supported\n");
ops->set_cache_memory_policy = set_cache_memory_policy_vi;
ops->register_process = register_process_vi;
ops->initialize = initialize_cpsch_vi;
+ ops->init_sdma_vm = init_sdma_vm;
}
static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
@@ -58,6 +61,11 @@ static int register_process_vi(struct device_queue_manager *dqm,
return -1;
}
+static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
+ struct qcm_process_device *qpd)
+{
+}
+
static int initialize_cpsch_vi(struct device_queue_manager *dqm)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index 17e56dcc8540..e621eba63126 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -142,14 +142,13 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- pr_debug("kfd: mapping doorbell page in kfd_doorbell_mmap\n"
- " target user address == 0x%08llX\n"
- " physical address == 0x%08llX\n"
- " vm_flags == 0x%04lX\n"
- " size == 0x%04lX\n",
- (unsigned long long) vma->vm_start, address, vma->vm_flags,
- doorbell_process_allocation());
-
+ pr_debug("mapping doorbell page:\n");
+ pr_debug(" target user address == 0x%08llX\n",
+ (unsigned long long) vma->vm_start);
+ pr_debug(" physical address == 0x%08llX\n", address);
+ pr_debug(" vm_flags == 0x%04lX\n", vma->vm_flags);
+ pr_debug(" size == 0x%04lX\n",
+ doorbell_process_allocation());
return io_remap_pfn_range(vma,
vma->vm_start,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
new file mode 100644
index 000000000000..b6e28dcaea1d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -0,0 +1,969 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/mm_types.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/memory.h>
+#include "kfd_priv.h"
+#include "kfd_events.h"
+#include <linux/device.h>
+
+/*
+ * A task can only be on a single wait_queue at a time, but we need to support
+ * waiting on multiple events (any/all).
+ * Instead of each event simply having a wait_queue with sleeping tasks, it
+ * has a singly-linked list of tasks.
+ * A thread that wants to sleep creates an array of these, one for each event
+ * and adds one to each event's waiter chain.
+ */
+struct kfd_event_waiter {
+ struct list_head waiters;
+ struct task_struct *sleeping_task;
+
+ /* Transitions to true when the event this belongs to is signaled. */
+ bool activated;
+
+ /* Event */
+ struct kfd_event *event;
+ uint32_t input_index;
+};
+
+/*
+ * Over-complicated pooled allocator for event notification slots.
+ *
+ * Each signal event needs a 64-bit signal slot where the signaler will write
+ * a 1 before sending an interrupt.l (This is needed because some interrupts
+ * do not contain enough spare data bits to identify an event.)
+ * We get whole pages from vmalloc and map them to the process VA.
+ * Individual signal events are then allocated a slot in a page.
+ */
+
+struct signal_page {
+ struct list_head event_pages; /* kfd_process.signal_event_pages */
+ uint64_t *kernel_address;
+ uint64_t __user *user_address;
+ uint32_t page_index; /* Index into the mmap aperture. */
+ unsigned int free_slots;
+ unsigned long used_slot_bitmap[0];
+};
+
+#define SLOTS_PER_PAGE KFD_SIGNAL_EVENT_LIMIT
+#define SLOT_BITMAP_SIZE BITS_TO_LONGS(SLOTS_PER_PAGE)
+#define BITS_PER_PAGE (ilog2(SLOTS_PER_PAGE)+1)
+#define SIGNAL_PAGE_SIZE (sizeof(struct signal_page) + \
+ SLOT_BITMAP_SIZE * sizeof(long))
+
+/*
+ * For signal events, the event ID is used as the interrupt user data.
+ * For SQ s_sendmsg interrupts, this is limited to 8 bits.
+ */
+
+#define INTERRUPT_DATA_BITS 8
+#define SIGNAL_EVENT_ID_SLOT_SHIFT 0
+
+static uint64_t *page_slots(struct signal_page *page)
+{
+ return page->kernel_address;
+}
+
+static bool allocate_free_slot(struct kfd_process *process,
+ struct signal_page **out_page,
+ unsigned int *out_slot_index)
+{
+ struct signal_page *page;
+
+ list_for_each_entry(page, &process->signal_event_pages, event_pages) {
+ if (page->free_slots > 0) {
+ unsigned int slot =
+ find_first_zero_bit(page->used_slot_bitmap,
+ SLOTS_PER_PAGE);
+
+ __set_bit(slot, page->used_slot_bitmap);
+ page->free_slots--;
+
+ page_slots(page)[slot] = UNSIGNALED_EVENT_SLOT;
+
+ *out_page = page;
+ *out_slot_index = slot;
+
+ pr_debug("allocated event signal slot in page %p, slot %d\n",
+ page, slot);
+
+ return true;
+ }
+ }
+
+ pr_debug("No free event signal slots were found for process %p\n",
+ process);
+
+ return false;
+}
+
+#define list_tail_entry(head, type, member) \
+ list_entry((head)->prev, type, member)
+
+static bool allocate_signal_page(struct file *devkfd, struct kfd_process *p)
+{
+ void *backing_store;
+ struct signal_page *page;
+
+ page = kzalloc(SIGNAL_PAGE_SIZE, GFP_KERNEL);
+ if (!page)
+ goto fail_alloc_signal_page;
+
+ page->free_slots = SLOTS_PER_PAGE;
+
+ backing_store = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
+ if (!backing_store)
+ goto fail_alloc_signal_store;
+
+ /* prevent user-mode info leaks */
+ memset(backing_store, (uint8_t) UNSIGNALED_EVENT_SLOT,
+ KFD_SIGNAL_EVENT_LIMIT * 8);
+
+ page->kernel_address = backing_store;
+
+ if (list_empty(&p->signal_event_pages))
+ page->page_index = 0;
+ else
+ page->page_index = list_tail_entry(&p->signal_event_pages,
+ struct signal_page,
+ event_pages)->page_index + 1;
+
+ pr_debug("allocated new event signal page at %p, for process %p\n",
+ page, p);
+ pr_debug("page index is %d\n", page->page_index);
+
+ list_add(&page->event_pages, &p->signal_event_pages);
+
+ return true;
+
+fail_alloc_signal_store:
+ kfree(page);
+fail_alloc_signal_page:
+ return false;
+}
+
+static bool allocate_event_notification_slot(struct file *devkfd,
+ struct kfd_process *p,
+ struct signal_page **page,
+ unsigned int *signal_slot_index)
+{
+ bool ret;
+
+ ret = allocate_free_slot(p, page, signal_slot_index);
+ if (ret == false) {
+ ret = allocate_signal_page(devkfd, p);
+ if (ret == true)
+ ret = allocate_free_slot(p, page, signal_slot_index);
+ }
+
+ return ret;
+}
+
+/* Assumes that the process's event_mutex is locked. */
+static void release_event_notification_slot(struct signal_page *page,
+ size_t slot_index)
+{
+ __clear_bit(slot_index, page->used_slot_bitmap);
+ page->free_slots++;
+
+ /* We don't free signal pages, they are retained by the process
+ * and reused until it exits. */
+}
+
+static struct signal_page *lookup_signal_page_by_index(struct kfd_process *p,
+ unsigned int page_index)
+{
+ struct signal_page *page;
+
+ /*
+ * This is safe because we don't delete signal pages until the
+ * process exits.
+ */
+ list_for_each_entry(page, &p->signal_event_pages, event_pages)
+ if (page->page_index == page_index)
+ return page;
+
+ return NULL;
+}
+
+/*
+ * Assumes that p->event_mutex is held and of course that p is not going
+ * away (current or locked).
+ */
+static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
+{
+ struct kfd_event *ev;
+
+ hash_for_each_possible(p->events, ev, events, id)
+ if (ev->event_id == id)
+ return ev;
+
+ return NULL;
+}
+
+static u32 make_signal_event_id(struct signal_page *page,
+ unsigned int signal_slot_index)
+{
+ return page->page_index |
+ (signal_slot_index << SIGNAL_EVENT_ID_SLOT_SHIFT);
+}
+
+/*
+ * Produce a kfd event id for a nonsignal event.
+ * These are arbitrary numbers, so we do a sequential search through
+ * the hash table for an unused number.
+ */
+static u32 make_nonsignal_event_id(struct kfd_process *p)
+{
+ u32 id;
+
+ for (id = p->next_nonsignal_event_id;
+ id < KFD_LAST_NONSIGNAL_EVENT_ID &&
+ lookup_event_by_id(p, id) != NULL;
+ id++)
+ ;
+
+ if (id < KFD_LAST_NONSIGNAL_EVENT_ID) {
+
+ /*
+ * What if id == LAST_NONSIGNAL_EVENT_ID - 1?
+ * Then next_nonsignal_event_id = LAST_NONSIGNAL_EVENT_ID so
+ * the first loop fails immediately and we proceed with the
+ * wraparound loop below.
+ */
+ p->next_nonsignal_event_id = id + 1;
+
+ return id;
+ }
+
+ for (id = KFD_FIRST_NONSIGNAL_EVENT_ID;
+ id < KFD_LAST_NONSIGNAL_EVENT_ID &&
+ lookup_event_by_id(p, id) != NULL;
+ id++)
+ ;
+
+
+ if (id < KFD_LAST_NONSIGNAL_EVENT_ID) {
+ p->next_nonsignal_event_id = id + 1;
+ return id;
+ }
+
+ p->next_nonsignal_event_id = KFD_FIRST_NONSIGNAL_EVENT_ID;
+ return 0;
+}
+
+static struct kfd_event *lookup_event_by_page_slot(struct kfd_process *p,
+ struct signal_page *page,
+ unsigned int signal_slot)
+{
+ return lookup_event_by_id(p, make_signal_event_id(page, signal_slot));
+}
+
+static int create_signal_event(struct file *devkfd,
+ struct kfd_process *p,
+ struct kfd_event *ev)
+{
+ if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
+ pr_warn("amdkfd: Signal event wasn't created because limit was reached\n");
+ return -ENOMEM;
+ }
+
+ if (!allocate_event_notification_slot(devkfd, p, &ev->signal_page,
+ &ev->signal_slot_index)) {
+ pr_warn("amdkfd: Signal event wasn't created because out of kernel memory\n");
+ return -ENOMEM;
+ }
+
+ p->signal_event_count++;
+
+ ev->user_signal_address =
+ &ev->signal_page->user_address[ev->signal_slot_index];
+
+ ev->event_id = make_signal_event_id(ev->signal_page,
+ ev->signal_slot_index);
+
+ pr_debug("signal event number %zu created with id %d, address %p\n",
+ p->signal_event_count, ev->event_id,
+ ev->user_signal_address);
+
+ pr_debug("signal event number %zu created with id %d, address %p\n",
+ p->signal_event_count, ev->event_id,
+ ev->user_signal_address);
+
+ return 0;
+}
+
+/*
+ * No non-signal events are supported yet.
+ * We create them as events that never signal.
+ * Set event calls from user-mode are failed.
+ */
+static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
+{
+ ev->event_id = make_nonsignal_event_id(p);
+ if (ev->event_id == 0)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void kfd_event_init_process(struct kfd_process *p)
+{
+ mutex_init(&p->event_mutex);
+ hash_init(p->events);
+ INIT_LIST_HEAD(&p->signal_event_pages);
+ p->next_nonsignal_event_id = KFD_FIRST_NONSIGNAL_EVENT_ID;
+ p->signal_event_count = 0;
+}
+
+static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
+{
+ if (ev->signal_page != NULL) {
+ release_event_notification_slot(ev->signal_page,
+ ev->signal_slot_index);
+ p->signal_event_count--;
+ }
+
+ /*
+ * Abandon the list of waiters. Individual waiting threads will
+ * clean up their own data.
+ */
+ list_del(&ev->waiters);
+
+ hash_del(&ev->events);
+ kfree(ev);
+}
+
+static void destroy_events(struct kfd_process *p)
+{
+ struct kfd_event *ev;
+ struct hlist_node *tmp;
+ unsigned int hash_bkt;
+
+ hash_for_each_safe(p->events, hash_bkt, tmp, ev, events)
+ destroy_event(p, ev);
+}
+
+/*
+ * We assume that the process is being destroyed and there is no need to
+ * unmap the pages or keep bookkeeping data in order.
+ */
+static void shutdown_signal_pages(struct kfd_process *p)
+{
+ struct signal_page *page, *tmp;
+
+ list_for_each_entry_safe(page, tmp, &p->signal_event_pages,
+ event_pages) {
+ free_pages((unsigned long)page->kernel_address,
+ get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
+ kfree(page);
+ }
+}
+
+void kfd_event_free_process(struct kfd_process *p)
+{
+ destroy_events(p);
+ shutdown_signal_pages(p);
+}
+
+static bool event_can_be_gpu_signaled(const struct kfd_event *ev)
+{
+ return ev->type == KFD_EVENT_TYPE_SIGNAL ||
+ ev->type == KFD_EVENT_TYPE_DEBUG;
+}
+
+static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
+{
+ return ev->type == KFD_EVENT_TYPE_SIGNAL;
+}
+
+int kfd_event_create(struct file *devkfd, struct kfd_process *p,
+ uint32_t event_type, bool auto_reset, uint32_t node_id,
+ uint32_t *event_id, uint32_t *event_trigger_data,
+ uint64_t *event_page_offset, uint32_t *event_slot_index)
+{
+ int ret = 0;
+ struct kfd_event *ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+
+ if (!ev)
+ return -ENOMEM;
+
+ ev->type = event_type;
+ ev->auto_reset = auto_reset;
+ ev->signaled = false;
+
+ INIT_LIST_HEAD(&ev->waiters);
+
+ *event_page_offset = 0;
+
+ mutex_lock(&p->event_mutex);
+
+ switch (event_type) {
+ case KFD_EVENT_TYPE_SIGNAL:
+ case KFD_EVENT_TYPE_DEBUG:
+ ret = create_signal_event(devkfd, p, ev);
+ if (!ret) {
+ *event_page_offset = (ev->signal_page->page_index |
+ KFD_MMAP_EVENTS_MASK);
+ *event_page_offset <<= PAGE_SHIFT;
+ *event_slot_index = ev->signal_slot_index;
+ }
+ break;
+ default:
+ ret = create_other_event(p, ev);
+ break;
+ }
+
+ if (!ret) {
+ hash_add(p->events, &ev->events, ev->event_id);
+
+ *event_id = ev->event_id;
+ *event_trigger_data = ev->event_id;
+ } else {
+ kfree(ev);
+ }
+
+ mutex_unlock(&p->event_mutex);
+
+ return ret;
+}
+
+/* Assumes that p is current. */
+int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
+{
+ struct kfd_event *ev;
+ int ret = 0;
+
+ mutex_lock(&p->event_mutex);
+
+ ev = lookup_event_by_id(p, event_id);
+
+ if (ev)
+ destroy_event(p, ev);
+ else
+ ret = -EINVAL;
+
+ mutex_unlock(&p->event_mutex);
+ return ret;
+}
+
+static void set_event(struct kfd_event *ev)
+{
+ struct kfd_event_waiter *waiter;
+ struct kfd_event_waiter *next;
+
+ /* Auto reset if the list is non-empty and we're waking someone. */
+ ev->signaled = !ev->auto_reset || list_empty(&ev->waiters);
+
+ list_for_each_entry_safe(waiter, next, &ev->waiters, waiters) {
+ waiter->activated = true;
+
+ /* _init because free_waiters will call list_del */
+ list_del_init(&waiter->waiters);
+
+ wake_up_process(waiter->sleeping_task);
+ }
+}
+
+/* Assumes that p is current. */
+int kfd_set_event(struct kfd_process *p, uint32_t event_id)
+{
+ int ret = 0;
+ struct kfd_event *ev;
+
+ mutex_lock(&p->event_mutex);
+
+ ev = lookup_event_by_id(p, event_id);
+
+ if (ev && event_can_be_cpu_signaled(ev))
+ set_event(ev);
+ else
+ ret = -EINVAL;
+
+ mutex_unlock(&p->event_mutex);
+ return ret;
+}
+
+static void reset_event(struct kfd_event *ev)
+{
+ ev->signaled = false;
+}
+
+/* Assumes that p is current. */
+int kfd_reset_event(struct kfd_process *p, uint32_t event_id)
+{
+ int ret = 0;
+ struct kfd_event *ev;
+
+ mutex_lock(&p->event_mutex);
+
+ ev = lookup_event_by_id(p, event_id);
+
+ if (ev && event_can_be_cpu_signaled(ev))
+ reset_event(ev);
+ else
+ ret = -EINVAL;
+
+ mutex_unlock(&p->event_mutex);
+ return ret;
+
+}
+
+static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev)
+{
+ page_slots(ev->signal_page)[ev->signal_slot_index] =
+ UNSIGNALED_EVENT_SLOT;
+}
+
+static bool is_slot_signaled(struct signal_page *page, unsigned int index)
+{
+ return page_slots(page)[index] != UNSIGNALED_EVENT_SLOT;
+}
+
+static void set_event_from_interrupt(struct kfd_process *p,
+ struct kfd_event *ev)
+{
+ if (ev && event_can_be_gpu_signaled(ev)) {
+ acknowledge_signal(p, ev);
+ set_event(ev);
+ }
+}
+
+void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
+ uint32_t valid_id_bits)
+{
+ struct kfd_event *ev;
+
+ /*
+ * Because we are called from arbitrary context (workqueue) as opposed
+ * to process context, kfd_process could attempt to exit while we are
+ * running so the lookup function returns a locked process.
+ */
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+
+ if (!p)
+ return; /* Presumably process exited. */
+
+ mutex_lock(&p->event_mutex);
+
+ if (valid_id_bits >= INTERRUPT_DATA_BITS) {
+ /* Partial ID is a full ID. */
+ ev = lookup_event_by_id(p, partial_id);
+ set_event_from_interrupt(p, ev);
+ } else {
+ /*
+ * Partial ID is in fact partial. For now we completely
+ * ignore it, but we could use any bits we did receive to
+ * search faster.
+ */
+ struct signal_page *page;
+ unsigned i;
+
+ list_for_each_entry(page, &p->signal_event_pages, event_pages)
+ for (i = 0; i < SLOTS_PER_PAGE; i++)
+ if (is_slot_signaled(page, i)) {
+ ev = lookup_event_by_page_slot(p,
+ page, i);
+ set_event_from_interrupt(p, ev);
+ }
+ }
+
+ mutex_unlock(&p->event_mutex);
+ mutex_unlock(&p->mutex);
+}
+
+static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
+{
+ struct kfd_event_waiter *event_waiters;
+ uint32_t i;
+
+ event_waiters = kmalloc_array(num_events,
+ sizeof(struct kfd_event_waiter),
+ GFP_KERNEL);
+
+ for (i = 0; (event_waiters) && (i < num_events) ; i++) {
+ INIT_LIST_HEAD(&event_waiters[i].waiters);
+ event_waiters[i].sleeping_task = current;
+ event_waiters[i].activated = false;
+ }
+
+ return event_waiters;
+}
+
+static int init_event_waiter(struct kfd_process *p,
+ struct kfd_event_waiter *waiter,
+ uint32_t event_id,
+ uint32_t input_index)
+{
+ struct kfd_event *ev = lookup_event_by_id(p, event_id);
+
+ if (!ev)
+ return -EINVAL;
+
+ waiter->event = ev;
+ waiter->input_index = input_index;
+ waiter->activated = ev->signaled;
+ ev->signaled = ev->signaled && !ev->auto_reset;
+
+ list_add(&waiter->waiters, &ev->waiters);
+
+ return 0;
+}
+
+static bool test_event_condition(bool all, uint32_t num_events,
+ struct kfd_event_waiter *event_waiters)
+{
+ uint32_t i;
+ uint32_t activated_count = 0;
+
+ for (i = 0; i < num_events; i++) {
+ if (event_waiters[i].activated) {
+ if (!all)
+ return true;
+
+ activated_count++;
+ }
+ }
+
+ return activated_count == num_events;
+}
+
+/*
+ * Copy event specific data, if defined.
+ * Currently only memory exception events have additional data to copy to user
+ */
+static bool copy_signaled_event_data(uint32_t num_events,
+ struct kfd_event_waiter *event_waiters,
+ struct kfd_event_data __user *data)
+{
+ struct kfd_hsa_memory_exception_data *src;
+ struct kfd_hsa_memory_exception_data __user *dst;
+ struct kfd_event_waiter *waiter;
+ struct kfd_event *event;
+ uint32_t i;
+
+ for (i = 0; i < num_events; i++) {
+ waiter = &event_waiters[i];
+ event = waiter->event;
+ if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) {
+ dst = &data[waiter->input_index].memory_exception_data;
+ src = &event->memory_exception_data;
+ if (copy_to_user(dst, src,
+ sizeof(struct kfd_hsa_memory_exception_data)))
+ return false;
+ }
+ }
+
+ return true;
+
+}
+
+
+
+static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
+{
+ if (user_timeout_ms == KFD_EVENT_TIMEOUT_IMMEDIATE)
+ return 0;
+
+ if (user_timeout_ms == KFD_EVENT_TIMEOUT_INFINITE)
+ return MAX_SCHEDULE_TIMEOUT;
+
+ /*
+ * msecs_to_jiffies interprets all values above 2^31-1 as infinite,
+ * but we consider them finite.
+ * This hack is wrong, but nobody is likely to notice.
+ */
+ user_timeout_ms = min_t(uint32_t, user_timeout_ms, 0x7FFFFFFF);
+
+ return msecs_to_jiffies(user_timeout_ms) + 1;
+}
+
+static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
+{
+ uint32_t i;
+
+ for (i = 0; i < num_events; i++)
+ list_del(&waiters[i].waiters);
+
+ kfree(waiters);
+}
+
+int kfd_wait_on_events(struct kfd_process *p,
+ uint32_t num_events, void __user *data,
+ bool all, uint32_t user_timeout_ms,
+ enum kfd_event_wait_result *wait_result)
+{
+ struct kfd_event_data __user *events =
+ (struct kfd_event_data __user *) data;
+ uint32_t i;
+ int ret = 0;
+ struct kfd_event_waiter *event_waiters = NULL;
+ long timeout = user_timeout_to_jiffies(user_timeout_ms);
+
+ mutex_lock(&p->event_mutex);
+
+ event_waiters = alloc_event_waiters(num_events);
+ if (!event_waiters) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ for (i = 0; i < num_events; i++) {
+ struct kfd_event_data event_data;
+
+ if (copy_from_user(&event_data, &events[i],
+ sizeof(struct kfd_event_data)))
+ goto fail;
+
+ ret = init_event_waiter(p, &event_waiters[i],
+ event_data.event_id, i);
+ if (ret)
+ goto fail;
+ }
+
+ mutex_unlock(&p->event_mutex);
+
+ while (true) {
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ /*
+ * This is wrong when a nonzero, non-infinite timeout
+ * is specified. We need to use
+ * ERESTARTSYS_RESTARTBLOCK, but struct restart_block
+ * contains a union with data for each user and it's
+ * in generic kernel code that I don't want to
+ * touch yet.
+ */
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ if (test_event_condition(all, num_events, event_waiters)) {
+ if (copy_signaled_event_data(num_events,
+ event_waiters, events))
+ *wait_result = KFD_WAIT_COMPLETE;
+ else
+ *wait_result = KFD_WAIT_ERROR;
+ break;
+ }
+
+ if (timeout <= 0) {
+ *wait_result = KFD_WAIT_TIMEOUT;
+ break;
+ }
+
+ timeout = schedule_timeout_interruptible(timeout);
+ }
+ __set_current_state(TASK_RUNNING);
+
+ mutex_lock(&p->event_mutex);
+ free_waiters(num_events, event_waiters);
+ mutex_unlock(&p->event_mutex);
+
+ return ret;
+
+fail:
+ if (event_waiters)
+ free_waiters(num_events, event_waiters);
+
+ mutex_unlock(&p->event_mutex);
+
+ *wait_result = KFD_WAIT_ERROR;
+
+ return ret;
+}
+
+int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
+{
+
+ unsigned int page_index;
+ unsigned long pfn;
+ struct signal_page *page;
+
+ /* check required size is logical */
+ if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) !=
+ get_order(vma->vm_end - vma->vm_start)) {
+ pr_err("amdkfd: event page mmap requested illegal size\n");
+ return -EINVAL;
+ }
+
+ page_index = vma->vm_pgoff;
+
+ page = lookup_signal_page_by_index(p, page_index);
+ if (!page) {
+ /* Probably KFD bug, but mmap is user-accessible. */
+ pr_debug("signal page could not be found for page_index %u\n",
+ page_index);
+ return -EINVAL;
+ }
+
+ pfn = __pa(page->kernel_address);
+ pfn >>= PAGE_SHIFT;
+
+ vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
+ | VM_DONTDUMP | VM_PFNMAP;
+
+ pr_debug("mapping signal page\n");
+ pr_debug(" start user address == 0x%08lx\n", vma->vm_start);
+ pr_debug(" end user address == 0x%08lx\n", vma->vm_end);
+ pr_debug(" pfn == 0x%016lX\n", pfn);
+ pr_debug(" vm_flags == 0x%08lX\n", vma->vm_flags);
+ pr_debug(" size == 0x%08lX\n",
+ vma->vm_end - vma->vm_start);
+
+ page->user_address = (uint64_t __user *)vma->vm_start;
+
+ /* mapping the page to user process */
+ return remap_pfn_range(vma, vma->vm_start, pfn,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
+/*
+ * Assumes that p->event_mutex is held and of course
+ * that p is not going away (current or locked).
+ */
+static void lookup_events_by_type_and_signal(struct kfd_process *p,
+ int type, void *event_data)
+{
+ struct kfd_hsa_memory_exception_data *ev_data;
+ struct kfd_event *ev;
+ int bkt;
+ bool send_signal = true;
+
+ ev_data = (struct kfd_hsa_memory_exception_data *) event_data;
+
+ hash_for_each(p->events, bkt, ev, events)
+ if (ev->type == type) {
+ send_signal = false;
+ dev_dbg(kfd_device,
+ "Event found: id %X type %d",
+ ev->event_id, ev->type);
+ set_event(ev);
+ if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data)
+ ev->memory_exception_data = *ev_data;
+ }
+
+ /* Send SIGTERM no event of type "type" has been found*/
+ if (send_signal) {
+ if (send_sigterm) {
+ dev_warn(kfd_device,
+ "Sending SIGTERM to HSA Process with PID %d ",
+ p->lead_thread->pid);
+ send_sig(SIGTERM, p->lead_thread, 0);
+ } else {
+ dev_err(kfd_device,
+ "HSA Process (PID %d) got unhandled exception",
+ p->lead_thread->pid);
+ }
+ }
+}
+
+void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
+ unsigned long address, bool is_write_requested,
+ bool is_execute_requested)
+{
+ struct kfd_hsa_memory_exception_data memory_exception_data;
+ struct vm_area_struct *vma;
+
+ /*
+ * Because we are called from arbitrary context (workqueue) as opposed
+ * to process context, kfd_process could attempt to exit while we are
+ * running so the lookup function returns a locked process.
+ */
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+
+ if (!p)
+ return; /* Presumably process exited. */
+
+ memset(&memory_exception_data, 0, sizeof(memory_exception_data));
+
+ down_read(&p->mm->mmap_sem);
+ vma = find_vma(p->mm, address);
+
+ memory_exception_data.gpu_id = dev->id;
+ memory_exception_data.va = address;
+ /* Set failure reason */
+ memory_exception_data.failure.NotPresent = 1;
+ memory_exception_data.failure.NoExecute = 0;
+ memory_exception_data.failure.ReadOnly = 0;
+ if (vma) {
+ if (vma->vm_start > address) {
+ memory_exception_data.failure.NotPresent = 1;
+ memory_exception_data.failure.NoExecute = 0;
+ memory_exception_data.failure.ReadOnly = 0;
+ } else {
+ memory_exception_data.failure.NotPresent = 0;
+ if (is_write_requested && !(vma->vm_flags & VM_WRITE))
+ memory_exception_data.failure.ReadOnly = 1;
+ else
+ memory_exception_data.failure.ReadOnly = 0;
+ if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
+ memory_exception_data.failure.NoExecute = 1;
+ else
+ memory_exception_data.failure.NoExecute = 0;
+ }
+ }
+
+ up_read(&p->mm->mmap_sem);
+
+ mutex_lock(&p->event_mutex);
+
+ /* Lookup events by type and signal them */
+ lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
+ &memory_exception_data);
+
+ mutex_unlock(&p->event_mutex);
+ mutex_unlock(&p->mutex);
+}
+
+void kfd_signal_hw_exception_event(unsigned int pasid)
+{
+ /*
+ * Because we are called from arbitrary context (workqueue) as opposed
+ * to process context, kfd_process could attempt to exit while we are
+ * running so the lookup function returns a locked process.
+ */
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+
+ if (!p)
+ return; /* Presumably process exited. */
+
+ mutex_lock(&p->event_mutex);
+
+ /* Lookup events by type and signal them */
+ lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL);
+
+ mutex_unlock(&p->event_mutex);
+ mutex_unlock(&p->mutex);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
new file mode 100644
index 000000000000..28f6838b1f4c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef KFD_EVENTS_H_INCLUDED
+#define KFD_EVENTS_H_INCLUDED
+
+#include <linux/kernel.h>
+#include <linux/hashtable.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include "kfd_priv.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+#define KFD_EVENT_ID_NONSIGNAL_MASK 0x80000000U
+#define KFD_FIRST_NONSIGNAL_EVENT_ID KFD_EVENT_ID_NONSIGNAL_MASK
+#define KFD_LAST_NONSIGNAL_EVENT_ID UINT_MAX
+
+/*
+ * Written into kfd_signal_slot_t to indicate that the event is not signaled.
+ * Since the event protocol may need to write the event ID into memory, this
+ * must not be a valid event ID.
+ * For the sake of easy memset-ing, this must be a byte pattern.
+ */
+#define UNSIGNALED_EVENT_SLOT ((uint64_t)-1)
+
+struct kfd_event_waiter;
+struct signal_page;
+
+struct kfd_event {
+ /* All events in process, rooted at kfd_process.events. */
+ struct hlist_node events;
+
+ u32 event_id;
+
+ bool signaled;
+ bool auto_reset;
+
+ int type;
+
+ struct list_head waiters; /* List of kfd_event_waiter by waiters. */
+
+ /* Only for signal events. */
+ struct signal_page *signal_page;
+ unsigned int signal_slot_index;
+ uint64_t __user *user_signal_address;
+
+ /* type specific data */
+ union {
+ struct kfd_hsa_memory_exception_data memory_exception_data;
+ };
+};
+
+#define KFD_EVENT_TIMEOUT_IMMEDIATE 0
+#define KFD_EVENT_TIMEOUT_INFINITE 0xFFFFFFFFu
+
+/* Matching HSA_EVENTTYPE */
+#define KFD_EVENT_TYPE_SIGNAL 0
+#define KFD_EVENT_TYPE_HW_EXCEPTION 3
+#define KFD_EVENT_TYPE_DEBUG 5
+#define KFD_EVENT_TYPE_MEMORY 8
+
+extern void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
+ uint32_t valid_id_bits);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
new file mode 100644
index 000000000000..7f134aa9bfd3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * KFD Interrupts.
+ *
+ * AMD GPUs deliver interrupts by pushing an interrupt description onto the
+ * interrupt ring and then sending an interrupt. KGD receives the interrupt
+ * in ISR and sends us a pointer to each new entry on the interrupt ring.
+ *
+ * We generally can't process interrupt-signaled events from ISR, so we call
+ * out to each interrupt client module (currently only the scheduler) to ask if
+ * each interrupt is interesting. If they return true, then it requires further
+ * processing so we copy it to an internal interrupt ring and call each
+ * interrupt client again from a work-queue.
+ *
+ * There's no acknowledgment for the interrupts we use. The hardware simply
+ * queues a new interrupt each time without waiting.
+ *
+ * The fixed-size internal queue means that it's possible for us to lose
+ * interrupts because we have no back-pressure to the hardware.
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include "kfd_priv.h"
+
+#define KFD_INTERRUPT_RING_SIZE 1024
+
+static void interrupt_wq(struct work_struct *);
+
+int kfd_interrupt_init(struct kfd_dev *kfd)
+{
+ void *interrupt_ring = kmalloc_array(KFD_INTERRUPT_RING_SIZE,
+ kfd->device_info->ih_ring_entry_size,
+ GFP_KERNEL);
+ if (!interrupt_ring)
+ return -ENOMEM;
+
+ kfd->interrupt_ring = interrupt_ring;
+ kfd->interrupt_ring_size =
+ KFD_INTERRUPT_RING_SIZE * kfd->device_info->ih_ring_entry_size;
+ atomic_set(&kfd->interrupt_ring_wptr, 0);
+ atomic_set(&kfd->interrupt_ring_rptr, 0);
+
+ spin_lock_init(&kfd->interrupt_lock);
+
+ INIT_WORK(&kfd->interrupt_work, interrupt_wq);
+
+ kfd->interrupts_active = true;
+
+ /*
+ * After this function returns, the interrupt will be enabled. This
+ * barrier ensures that the interrupt running on a different processor
+ * sees all the above writes.
+ */
+ smp_wmb();
+
+ return 0;
+}
+
+void kfd_interrupt_exit(struct kfd_dev *kfd)
+{
+ /*
+ * Stop the interrupt handler from writing to the ring and scheduling
+ * workqueue items. The spinlock ensures that any interrupt running
+ * after we have unlocked sees interrupts_active = false.
+ */
+ unsigned long flags;
+
+ spin_lock_irqsave(&kfd->interrupt_lock, flags);
+ kfd->interrupts_active = false;
+ spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
+
+ /*
+ * Flush_scheduled_work ensures that there are no outstanding
+ * work-queue items that will access interrupt_ring. New work items
+ * can't be created because we stopped interrupt handling above.
+ */
+ flush_scheduled_work();
+
+ kfree(kfd->interrupt_ring);
+}
+
+/*
+ * This assumes that it can't be called concurrently with itself
+ * but only with dequeue_ih_ring_entry.
+ */
+bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry)
+{
+ unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
+ unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
+
+ if ((rptr - wptr) % kfd->interrupt_ring_size ==
+ kfd->device_info->ih_ring_entry_size) {
+ /* This is very bad, the system is likely to hang. */
+ dev_err_ratelimited(kfd_chardev(),
+ "Interrupt ring overflow, dropping interrupt.\n");
+ return false;
+ }
+
+ memcpy(kfd->interrupt_ring + wptr, ih_ring_entry,
+ kfd->device_info->ih_ring_entry_size);
+
+ wptr = (wptr + kfd->device_info->ih_ring_entry_size) %
+ kfd->interrupt_ring_size;
+ smp_wmb(); /* Ensure memcpy'd data is visible before wptr update. */
+ atomic_set(&kfd->interrupt_ring_wptr, wptr);
+
+ return true;
+}
+
+/*
+ * This assumes that it can't be called concurrently with itself
+ * but only with enqueue_ih_ring_entry.
+ */
+static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry)
+{
+ /*
+ * Assume that wait queues have an implicit barrier, i.e. anything that
+ * happened in the ISR before it queued work is visible.
+ */
+
+ unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
+ unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
+
+ if (rptr == wptr)
+ return false;
+
+ memcpy(ih_ring_entry, kfd->interrupt_ring + rptr,
+ kfd->device_info->ih_ring_entry_size);
+
+ rptr = (rptr + kfd->device_info->ih_ring_entry_size) %
+ kfd->interrupt_ring_size;
+
+ /*
+ * Ensure the rptr write update is not visible until
+ * memcpy has finished reading.
+ */
+ smp_mb();
+ atomic_set(&kfd->interrupt_ring_rptr, rptr);
+
+ return true;
+}
+
+static void interrupt_wq(struct work_struct *work)
+{
+ struct kfd_dev *dev = container_of(work, struct kfd_dev,
+ interrupt_work);
+
+ uint32_t ih_ring_entry[DIV_ROUND_UP(
+ dev->device_info->ih_ring_entry_size,
+ sizeof(uint32_t))];
+
+ while (dequeue_ih_ring_entry(dev, ih_ring_entry))
+ dev->device_info->event_interrupt_class->interrupt_wq(dev,
+ ih_ring_entry);
+}
+
+bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry)
+{
+ /* integer and bitwise OR so there is no boolean short-circuiting */
+ unsigned wanted = 0;
+
+ wanted |= dev->device_info->event_interrupt_class->interrupt_isr(dev,
+ ih_ring_entry);
+
+ return wanted != 0;
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index c7d298e62c96..8fa894100290 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -215,8 +215,9 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
queue_address = (unsigned int *)kq->pq_kernel_addr;
queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t);
- pr_debug("amdkfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n",
- __func__, rptr, wptr, queue_address);
+ pr_debug("rptr: %d\n", rptr);
+ pr_debug("wptr: %d\n", wptr);
+ pr_debug("queue_address 0x%p\n", queue_address);
available_size = (rptr - 1 - wptr + queue_size_dwords) %
queue_size_dwords;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 4e0a68f13a77..ca8410e8683d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -29,10 +29,10 @@
#define KFD_DRIVER_AUTHOR "AMD Inc. and others"
#define KFD_DRIVER_DESC "Standalone HSA driver for AMD's GPUs"
-#define KFD_DRIVER_DATE "20150122"
+#define KFD_DRIVER_DATE "20150421"
#define KFD_DRIVER_MAJOR 0
#define KFD_DRIVER_MINOR 7
-#define KFD_DRIVER_PATCHLEVEL 1
+#define KFD_DRIVER_PATCHLEVEL 2
static const struct kgd2kfd_calls kgd2kfd = {
.exit = kgd2kfd_exit,
@@ -54,6 +54,11 @@ module_param(max_num_of_queues_per_device, int, 0444);
MODULE_PARM_DESC(max_num_of_queues_per_device,
"Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
+int send_sigterm;
+module_param(send_sigterm, int, 0444);
+MODULE_PARM_DESC(send_sigterm,
+ "Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)");
+
bool kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f)
{
/*
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index e2533d875f43..99b6d28a11c3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -163,7 +163,7 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
num_queues = 0;
list_for_each_entry(cur, &qpd->queues_list, list)
num_queues++;
- packet->bitfields10.num_queues = num_queues;
+ packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : num_queues;
packet->sh_mem_config = qpd->sh_mem_config;
packet->sh_mem_bases = qpd->sh_mem_bases;
@@ -177,9 +177,10 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
}
static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
- struct queue *q)
+ struct queue *q, bool is_static)
{
struct pm4_map_queues *packet;
+ bool use_static = is_static;
BUG_ON(!pm || !buffer || !q);
@@ -209,6 +210,7 @@ static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
case KFD_QUEUE_TYPE_SDMA:
packet->bitfields2.engine_sel =
engine_sel__mes_map_queues__sdma0;
+ use_static = false; /* no static queues under SDMA */
break;
default:
BUG();
@@ -218,6 +220,9 @@ static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
packet->mes_map_queues_ordinals[0].bitfields3.doorbell_offset =
q->properties.doorbell_off;
+ packet->mes_map_queues_ordinals[0].bitfields3.is_static =
+ (use_static == true) ? 1 : 0;
+
packet->mes_map_queues_ordinals[0].mqd_addr_lo =
lower_32_bits(q->gart_mqd_addr);
@@ -271,9 +276,11 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
pm_release_ib(pm);
return -ENOMEM;
}
+
retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd);
if (retval != 0)
return retval;
+
proccesses_mapped++;
inc_wptr(&rl_wptr, sizeof(struct pm4_map_process),
alloc_size_bytes);
@@ -281,23 +288,36 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
list_for_each_entry(kq, &qpd->priv_queue_list, list) {
if (kq->queue->properties.is_active != true)
continue;
+
+ pr_debug("kfd: static_queue, mapping kernel q %d, is debug status %d\n",
+ kq->queue->queue, qpd->is_debug);
+
retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
- kq->queue);
+ kq->queue, qpd->is_debug);
if (retval != 0)
return retval;
- inc_wptr(&rl_wptr, sizeof(struct pm4_map_queues),
- alloc_size_bytes);
+
+ inc_wptr(&rl_wptr,
+ sizeof(struct pm4_map_queues),
+ alloc_size_bytes);
}
list_for_each_entry(q, &qpd->queues_list, list) {
if (q->properties.is_active != true)
continue;
- retval = pm_create_map_queue(pm,
- &rl_buffer[rl_wptr], q);
+
+ pr_debug("kfd: static_queue, mapping user queue %d, is debug status %d\n",
+ q->queue, qpd->is_debug);
+
+ retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
+ q, qpd->is_debug);
+
if (retval != 0)
return retval;
- inc_wptr(&rl_wptr, sizeof(struct pm4_map_queues),
- alloc_size_bytes);
+
+ inc_wptr(&rl_wptr,
+ sizeof(struct pm4_map_queues),
+ alloc_size_bytes);
}
}
@@ -488,7 +508,8 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
packet = (struct pm4_unmap_queues *)buffer;
memset(buffer, 0, sizeof(struct pm4_unmap_queues));
-
+ pr_debug("kfd: static_queue: unmapping queues: mode is %d , reset is %d , type is %d\n",
+ mode, reset, type);
packet->header.u32all = build_pm4_header(IT_UNMAP_QUEUES,
sizeof(struct pm4_unmap_queues));
switch (type) {
@@ -529,6 +550,11 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__perform_request_on_all_active_queues;
break;
+ case KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES:
+ /* in this case, we do not preempt static queues */
+ packet->bitfields2.queue_sel =
+ queue_sel__mes_unmap_queues__perform_request_on_dynamic_queues_only;
+ break;
default:
BUG();
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h
index 071ad5724bd2..5b393f3e34a9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h
@@ -237,7 +237,8 @@ struct pm4_map_queues {
struct {
union {
struct {
- uint32_t reserved5:2;
+ uint32_t is_static:1;
+ uint32_t reserved5:1;
uint32_t doorbell_offset:21;
uint32_t reserved6:3;
uint32_t queue:6;
@@ -328,7 +329,8 @@ enum unmap_queues_action_enum {
enum unmap_queues_queue_sel_enum {
queue_sel__mes_unmap_queues__perform_request_on_specified_queues = 0,
queue_sel__mes_unmap_queues__perform_request_on_pasid_queues = 1,
- queue_sel__mes_unmap_queues__perform_request_on_all_active_queues = 2
+ queue_sel__mes_unmap_queues__perform_request_on_all_active_queues = 2,
+ queue_sel__mes_unmap_queues__perform_request_on_dynamic_queues_only = 3
};
enum unmap_queues_engine_sel_enum {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_diq.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_diq.h
new file mode 100644
index 000000000000..a0ff34878163
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_diq.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef KFD_PM4_HEADERS_DIQ_H_
+#define KFD_PM4_HEADERS_DIQ_H_
+
+/*--------------------_INDIRECT_BUFFER-------------------- */
+
+#ifndef _PM4__INDIRECT_BUFFER_DEFINED
+#define _PM4__INDIRECT_BUFFER_DEFINED
+enum _INDIRECT_BUFFER_cache_policy_enum {
+ cache_policy___indirect_buffer__lru = 0,
+ cache_policy___indirect_buffer__stream = 1,
+ cache_policy___indirect_buffer__bypass = 2
+};
+
+enum {
+ IT_INDIRECT_BUFFER_PASID = 0x5C
+};
+
+struct pm4__indirect_buffer_pasid {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ unsigned int ordinal1;
+ };
+
+ union {
+ struct {
+ unsigned int reserved1:2;
+ unsigned int ib_base_lo:30;
+ } bitfields2;
+ unsigned int ordinal2;
+ };
+
+ union {
+ struct {
+ unsigned int ib_base_hi:16;
+ unsigned int reserved2:16;
+ } bitfields3;
+ unsigned int ordinal3;
+ };
+
+ union {
+ unsigned int control;
+ unsigned int ordinal4;
+ };
+
+ union {
+ struct {
+ unsigned int pasid:10;
+ unsigned int reserved4:22;
+ } bitfields5;
+ unsigned int ordinal5;
+ };
+
+};
+
+#endif
+
+/*--------------------_RELEASE_MEM-------------------- */
+
+#ifndef _PM4__RELEASE_MEM_DEFINED
+#define _PM4__RELEASE_MEM_DEFINED
+enum _RELEASE_MEM_event_index_enum {
+ event_index___release_mem__end_of_pipe = 5,
+ event_index___release_mem__shader_done = 6
+};
+
+enum _RELEASE_MEM_cache_policy_enum {
+ cache_policy___release_mem__lru = 0,
+ cache_policy___release_mem__stream = 1,
+ cache_policy___release_mem__bypass = 2
+};
+
+enum _RELEASE_MEM_dst_sel_enum {
+ dst_sel___release_mem__memory_controller = 0,
+ dst_sel___release_mem__tc_l2 = 1,
+ dst_sel___release_mem__queue_write_pointer_register = 2,
+ dst_sel___release_mem__queue_write_pointer_poll_mask_bit = 3
+};
+
+enum _RELEASE_MEM_int_sel_enum {
+ int_sel___release_mem__none = 0,
+ int_sel___release_mem__send_interrupt_only = 1,
+ int_sel___release_mem__send_interrupt_after_write_confirm = 2,
+ int_sel___release_mem__send_data_after_write_confirm = 3
+};
+
+enum _RELEASE_MEM_data_sel_enum {
+ data_sel___release_mem__none = 0,
+ data_sel___release_mem__send_32_bit_low = 1,
+ data_sel___release_mem__send_64_bit_data = 2,
+ data_sel___release_mem__send_gpu_clock_counter = 3,
+ data_sel___release_mem__send_cp_perfcounter_hi_lo = 4,
+ data_sel___release_mem__store_gds_data_to_memory = 5
+};
+
+struct pm4__release_mem {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /*header */
+ unsigned int ordinal1;
+ };
+
+ union {
+ struct {
+ unsigned int event_type:6;
+ unsigned int reserved1:2;
+ enum _RELEASE_MEM_event_index_enum event_index:4;
+ unsigned int tcl1_vol_action_ena:1;
+ unsigned int tc_vol_action_ena:1;
+ unsigned int reserved2:1;
+ unsigned int tc_wb_action_ena:1;
+ unsigned int tcl1_action_ena:1;
+ unsigned int tc_action_ena:1;
+ unsigned int reserved3:6;
+ unsigned int atc:1;
+ enum _RELEASE_MEM_cache_policy_enum cache_policy:2;
+ unsigned int reserved4:5;
+ } bitfields2;
+ unsigned int ordinal2;
+ };
+
+ union {
+ struct {
+ unsigned int reserved5:16;
+ enum _RELEASE_MEM_dst_sel_enum dst_sel:2;
+ unsigned int reserved6:6;
+ enum _RELEASE_MEM_int_sel_enum int_sel:3;
+ unsigned int reserved7:2;
+ enum _RELEASE_MEM_data_sel_enum data_sel:3;
+ } bitfields3;
+ unsigned int ordinal3;
+ };
+
+ union {
+ struct {
+ unsigned int reserved8:2;
+ unsigned int address_lo_32b:30;
+ } bitfields4;
+ struct {
+ unsigned int reserved9:3;
+ unsigned int address_lo_64b:29;
+ } bitfields5;
+ unsigned int ordinal4;
+ };
+
+ unsigned int address_hi;
+
+ unsigned int data_lo;
+
+ unsigned int data_hi;
+
+};
+#endif
+
+
+/*--------------------_SET_CONFIG_REG-------------------- */
+
+#ifndef _PM4__SET_CONFIG_REG_DEFINED
+#define _PM4__SET_CONFIG_REG_DEFINED
+
+struct pm4__set_config_reg {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /*header */
+ unsigned int ordinal1;
+ };
+
+ union {
+ struct {
+ unsigned int reg_offset:16;
+ unsigned int reserved1:7;
+ unsigned int vmid_shift:5;
+ unsigned int insert_vmid:1;
+ unsigned int reserved2:3;
+ } bitfields2;
+ unsigned int ordinal2;
+ };
+
+ unsigned int reg_data[1]; /*1..N of these fields */
+
+};
+#endif
+
+/*--------------------_WAIT_REG_MEM-------------------- */
+
+#ifndef _PM4__WAIT_REG_MEM_DEFINED
+#define _PM4__WAIT_REG_MEM_DEFINED
+enum _WAIT_REG_MEM_function_enum {
+ function___wait_reg_mem__always_pass = 0,
+ function___wait_reg_mem__less_than_ref_value = 1,
+ function___wait_reg_mem__less_than_equal_to_the_ref_value = 2,
+ function___wait_reg_mem__equal_to_the_reference_value = 3,
+ function___wait_reg_mem__not_equal_reference_value = 4,
+ function___wait_reg_mem__greater_than_or_equal_reference_value = 5,
+ function___wait_reg_mem__greater_than_reference_value = 6,
+ function___wait_reg_mem__reserved = 7
+};
+
+enum _WAIT_REG_MEM_mem_space_enum {
+ mem_space___wait_reg_mem__register_space = 0,
+ mem_space___wait_reg_mem__memory_space = 1
+};
+
+enum _WAIT_REG_MEM_operation_enum {
+ operation___wait_reg_mem__wait_reg_mem = 0,
+ operation___wait_reg_mem__wr_wait_wr_reg = 1
+};
+
+struct pm4__wait_reg_mem {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /*header */
+ unsigned int ordinal1;
+ };
+
+ union {
+ struct {
+ enum _WAIT_REG_MEM_function_enum function:3;
+ unsigned int reserved1:1;
+ enum _WAIT_REG_MEM_mem_space_enum mem_space:2;
+ enum _WAIT_REG_MEM_operation_enum operation:2;
+ unsigned int reserved2:24;
+ } bitfields2;
+ unsigned int ordinal2;
+ };
+
+ union {
+ struct {
+ unsigned int reserved3:2;
+ unsigned int memory_poll_addr_lo:30;
+ } bitfields3;
+ struct {
+ unsigned int register_poll_addr:16;
+ unsigned int reserved4:16;
+ } bitfields4;
+ struct {
+ unsigned int register_write_addr:16;
+ unsigned int reserved5:16;
+ } bitfields5;
+ unsigned int ordinal3;
+ };
+
+ union {
+ struct {
+ unsigned int poll_address_hi:16;
+ unsigned int reserved6:16;
+ } bitfields6;
+ struct {
+ unsigned int register_write_addr:16;
+ unsigned int reserved7:16;
+ } bitfields7;
+ unsigned int ordinal4;
+ };
+
+ unsigned int reference;
+
+ unsigned int mask;
+
+ union {
+ struct {
+ unsigned int poll_interval:16;
+ unsigned int reserved8:16;
+ } bitfields8;
+ unsigned int ordinal7;
+ };
+
+};
+#endif
+
+
+#endif /* KFD_PM4_HEADERS_DIQ_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index f21fccebd75b..d0d5f4baf72d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -35,6 +35,9 @@
#define KFD_SYSFS_FILE_MODE 0444
+#define KFD_MMAP_DOORBELL_MASK 0x8000000000000
+#define KFD_MMAP_EVENTS_MASK 0x4000000000000
+
/*
* When working with cp scheduler we should assign the HIQ manually or via
* the radeon driver to a fixed hqd slot, here are the fixed HIQ hqd slot
@@ -71,6 +74,12 @@ extern int max_num_of_queues_per_device;
/* Kernel module parameter to specify the scheduling policy */
extern int sched_policy;
+/*
+ * Kernel module parameter to specify whether to send sigterm to HSA process on
+ * unhandled exception
+ */
+extern int send_sigterm;
+
/**
* enum kfd_sched_policy
*
@@ -108,9 +117,18 @@ enum asic_family_type {
CHIP_CARRIZO
};
+struct kfd_event_interrupt_class {
+ bool (*interrupt_isr)(struct kfd_dev *dev,
+ const uint32_t *ih_ring_entry);
+ void (*interrupt_wq)(struct kfd_dev *dev,
+ const uint32_t *ih_ring_entry);
+};
+
struct kfd_device_info {
unsigned int asic_family;
+ const struct kfd_event_interrupt_class *event_interrupt_class;
unsigned int max_pasid_bits;
+ unsigned int max_no_of_hqd;
size_t ih_ring_entry_size;
uint8_t num_of_watch_points;
uint16_t mqd_size_aligned;
@@ -150,8 +168,8 @@ struct kfd_dev {
const struct kfd2kgd_calls *kfd2kgd;
struct mutex doorbell_mutex;
- unsigned long doorbell_available_index[DIV_ROUND_UP(
- KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)];
+ DECLARE_BITMAP(doorbell_available_index,
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
void *gtt_mem;
uint64_t gtt_start_gpu_addr;
@@ -161,10 +179,26 @@ struct kfd_dev {
unsigned int gtt_sa_chunk_size;
unsigned int gtt_sa_num_of_chunks;
+ /* Interrupts */
+ void *interrupt_ring;
+ size_t interrupt_ring_size;
+ atomic_t interrupt_ring_rptr;
+ atomic_t interrupt_ring_wptr;
+ struct work_struct interrupt_work;
+ spinlock_t interrupt_lock;
+
/* QCM Device instance */
struct device_queue_manager *dqm;
bool init_complete;
+ /*
+ * Interrupts of interest to KFD are copied
+ * from the HW ring into a SW ring.
+ */
+ bool interrupts_active;
+
+ /* Debug manager */
+ struct kfd_dbgmgr *dbgmgr;
};
/* KGD2KFD callbacks */
@@ -201,6 +235,7 @@ struct device *kfd_chardev(void);
enum kfd_preempt_type_filter {
KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE,
KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES,
+ KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES,
KFD_PREEMPT_TYPE_FILTER_BY_PASID
};
@@ -428,6 +463,11 @@ struct kfd_process_device {
/* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
bool bound;
+
+ /* This flag tells if we should reset all
+ * wavefronts on process termination
+ */
+ bool reset_wavefronts;
};
#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
@@ -473,10 +513,17 @@ struct kfd_process {
/* Size is queue_array_size, up to MAX_PROCESS_QUEUES. */
struct kfd_queue **queues;
- unsigned long allocated_queue_bitmap[DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)];
-
/*Is the user space process 32 bit?*/
bool is_32bit_user_mode;
+
+ /* Event-related data */
+ struct mutex event_mutex;
+ /* All events in process hashed by ID, linked on kfd_event.events. */
+ DECLARE_HASHTABLE(events, 4);
+ struct list_head signal_event_pages; /* struct slot_page_header.
+ event_pages */
+ u32 next_nonsignal_event_id;
+ size_t signal_event_count;
};
/**
@@ -501,6 +548,7 @@ void kfd_process_create_wq(void);
void kfd_process_destroy_wq(void);
struct kfd_process *kfd_create_process(const struct task_struct *);
struct kfd_process *kfd_get_process(const struct task_struct *);
+struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid);
struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
struct kfd_process *p);
@@ -555,7 +603,11 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx);
/* Interrupts */
+int kfd_interrupt_init(struct kfd_dev *dev);
+void kfd_interrupt_exit(struct kfd_dev *dev);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
+bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
+bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry);
/* Power Management */
void kgd2kfd_suspend(struct kfd_dev *kfd);
@@ -606,6 +658,12 @@ int pqm_create_queue(struct process_queue_manager *pqm,
int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
struct queue_properties *p);
+struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
+ unsigned int qid);
+
+int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
+ unsigned int fence_value,
+ unsigned long timeout);
/* Packet Manager */
@@ -642,4 +700,37 @@ uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
struct kfd_process *process);
+/* Events */
+extern const struct kfd_event_interrupt_class event_interrupt_class_cik;
+extern const struct kfd_device_global_init_class device_global_init_class_cik;
+
+enum kfd_event_wait_result {
+ KFD_WAIT_COMPLETE,
+ KFD_WAIT_TIMEOUT,
+ KFD_WAIT_ERROR
+};
+
+void kfd_event_init_process(struct kfd_process *p);
+void kfd_event_free_process(struct kfd_process *p);
+int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
+int kfd_wait_on_events(struct kfd_process *p,
+ uint32_t num_events, void __user *data,
+ bool all, uint32_t user_timeout_ms,
+ enum kfd_event_wait_result *wait_result);
+void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
+ uint32_t valid_id_bits);
+void kfd_signal_iommu_event(struct kfd_dev *dev,
+ unsigned int pasid, unsigned long address,
+ bool is_write_requested, bool is_execute_requested);
+void kfd_signal_hw_exception_event(unsigned int pasid);
+int kfd_set_event(struct kfd_process *p, uint32_t event_id);
+int kfd_reset_event(struct kfd_process *p, uint32_t event_id);
+int kfd_event_create(struct file *devkfd, struct kfd_process *p,
+ uint32_t event_type, bool auto_reset, uint32_t node_id,
+ uint32_t *event_id, uint32_t *event_trigger_data,
+ uint64_t *event_page_offset, uint32_t *event_slot_index);
+int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
+
+int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 945d6226dc51..8a1f999daa24 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -31,6 +31,7 @@
struct mm_struct;
#include "kfd_priv.h"
+#include "kfd_dbgmgr.h"
/*
* Initial size for the array of queues.
@@ -172,12 +173,17 @@ static void kfd_process_wq_release(struct work_struct *work)
pr_debug("Releasing pdd (topology id %d) for process (pasid %d) in workqueue\n",
pdd->dev->id, p->pasid);
+ if (pdd->reset_wavefronts)
+ dbgdev_wave_reset_wavefronts(pdd->dev, p);
+
amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
list_del(&pdd->per_device_list);
kfree(pdd);
}
+ kfd_event_free_process(p);
+
kfd_pasid_free(p->pasid);
mutex_unlock(&p->mutex);
@@ -203,8 +209,7 @@ static void kfd_process_destroy_delayed(struct rcu_head *rcu)
mmdrop(p->mm);
- work = (struct kfd_process_release_work *)
- kmalloc(sizeof(struct kfd_process_release_work), GFP_ATOMIC);
+ work = kmalloc(sizeof(struct kfd_process_release_work), GFP_ATOMIC);
if (work) {
INIT_WORK((struct work_struct *) work, kfd_process_wq_release);
@@ -217,6 +222,7 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
struct kfd_process *p;
+ struct kfd_process_device *pdd = NULL;
/*
* The kfd_process structure can not be free because the
@@ -235,6 +241,15 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
/* In case our notifier is called before IOMMU notifier */
pqm_uninit(&p->pqm);
+ /* Iterate over all process device data structure and check
+ * if we should reset all wavefronts */
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list)
+ if (pdd->reset_wavefronts) {
+ pr_warn("amdkfd: Resetting all wave fronts\n");
+ dbgdev_wave_reset_wavefronts(pdd->dev, p);
+ pdd->reset_wavefronts = false;
+ }
+
mutex_unlock(&p->mutex);
/*
@@ -289,6 +304,8 @@ static struct kfd_process *create_process(const struct task_struct *thread)
INIT_LIST_HEAD(&process->per_device_data);
+ kfd_event_init_process(process);
+
err = pqm_init(&process->pqm, process);
if (err != 0)
goto err_process_pqm_init;
@@ -339,6 +356,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
INIT_LIST_HEAD(&pdd->qpd.queues_list);
INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
pdd->qpd.dqm = dev->dqm;
+ pdd->reset_wavefronts = false;
list_add(&pdd->per_device_list, &p->per_device_data);
}
@@ -396,9 +414,16 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
mutex_lock(&p->mutex);
+ if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
+ kfd_dbgmgr_destroy(dev->dbgmgr);
+
pqm_uninit(&p->pqm);
pdd = kfd_get_process_device_data(dev, p);
+ if (pdd->reset_wavefronts) {
+ dbgdev_wave_reset_wavefronts(pdd->dev, p);
+ pdd->reset_wavefronts = false;
+ }
/*
* Just mark pdd as unbound, because we still need it to call
@@ -431,3 +456,23 @@ bool kfd_has_process_device_data(struct kfd_process *p)
{
return !(list_empty(&p->per_device_data));
}
+
+/* This returns with process->mutex locked. */
+struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
+{
+ struct kfd_process *p;
+ unsigned int temp;
+
+ int idx = srcu_read_lock(&kfd_processes_srcu);
+
+ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+ if (p->pasid == pasid) {
+ mutex_lock(&p->mutex);
+ break;
+ }
+ }
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+
+ return p;
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 530b82c4e78b..7b69070f7ecc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -158,6 +158,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
struct queue *q;
struct process_queue_node *pqn;
struct kernel_queue *kq;
+ int num_queues = 0;
+ struct queue *cur;
BUG_ON(!pqm || !dev || !properties || !qid);
@@ -172,6 +174,20 @@ int pqm_create_queue(struct process_queue_manager *pqm,
return -1;
}
+ /*
+ * for debug process, verify that it is within the static queues limit
+ * currently limit is set to half of the total avail HQD slots
+ * If we are just about to create DIQ, the is_debug flag is not set yet
+ * Hence we also check the type as well
+ */
+ if ((pdd->qpd.is_debug) ||
+ (type == KFD_QUEUE_TYPE_DIQ)) {
+ list_for_each_entry(cur, &pdd->qpd.queues_list, list)
+ num_queues++;
+ if (num_queues >= dev->device_info->max_no_of_hqd/2)
+ return (-ENOSPC);
+ }
+
retval = find_available_queue_slot(pqm, qid);
if (retval != 0)
return retval;
@@ -341,7 +357,7 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
return 0;
}
-static __attribute__((unused)) struct kernel_queue *pqm_get_kernel_queue(
+struct kernel_queue *pqm_get_kernel_queue(
struct process_queue_manager *pqm,
unsigned int qid)
{