summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c15
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c41
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c1
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1
-rw-r--r--include/uapi/drm/i915_drm.h3
9 files changed, 68 insertions, 4 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0d67b17ceef0..122a4acb2580 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -495,9 +495,9 @@ struct drm_i915_error_state {
u32 userptr:1;
s32 ring:4;
u32 cache_level:3;
- } **active_bo, **pinned_bo;
+ } **active_bo, **pinned_bo, *requested_bo;
- u32 *active_bo_count, *pinned_bo_count;
+ u32 *active_bo_count, *pinned_bo_count, requested_bo_count;
u32 vm_count;
};
@@ -2154,6 +2154,8 @@ struct drm_i915_gem_request {
/** file_priv list entry for this request */
struct list_head client_list;
+ struct list_head error_list;
+
uint32_t uniq;
/**
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index fc8188906116..9a6d268e78b7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2308,6 +2308,8 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (!list_empty(&vma->mm_list))
list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
+
+ list_del_init(&vma->error_collection_link);
}
intel_fb_obj_flush(obj, true);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index b773368fc62c..56a2d2f45780 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -44,6 +44,7 @@
struct eb_vmas {
struct list_head vmas;
int and;
+ struct list_head error_collection_list; /* Temporary list */
union {
struct i915_vma *lut[0];
struct hlist_head buckets[0];
@@ -79,6 +80,7 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
eb->and = -args->buffer_count;
INIT_LIST_HEAD(&eb->vmas);
+ INIT_LIST_HEAD(&eb->error_collection_list);
return eb;
}
@@ -87,6 +89,7 @@ eb_reset(struct eb_vmas *eb)
{
if (eb->and >= 0)
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
+ list_del_init(&eb->error_collection_list);
}
static int
@@ -100,6 +103,9 @@ eb_lookup_vmas(struct eb_vmas *eb,
struct list_head objects;
int i, ret;
+ if (WARN_ON(!list_empty(&eb->error_collection_list)))
+ list_del_init(&eb->error_collection_list);
+
INIT_LIST_HEAD(&objects);
spin_lock(&file->table_lock);
/* Grab a reference to the object and release the lock so we can lookup
@@ -163,6 +169,10 @@ eb_lookup_vmas(struct eb_vmas *eb,
hlist_add_head(&vma->exec_node,
&eb->buckets[handle & eb->and]);
}
+
+ if (exec[i].flags & EXEC_OBJECT_COLLECT_ON_ERROR)
+ list_add_tail(&vma->error_collection_link,
+ &eb->error_collection_list);
++i;
}
@@ -1348,6 +1358,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_i915_gem_exec_object2 shadow_exec_entry;
+ struct drm_i915_gem_request *req;
struct intel_engine_cs *ring;
struct intel_context *ctx;
struct i915_address_space *vm;
@@ -1537,6 +1548,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
*/
if (flags & I915_DISPATCH_SECURE)
i915_gem_object_ggtt_unpin(batch_obj);
+
+ req = intel_ring_get_request(ring);
+ list_splice(&eb->error_collection_list, &req->error_list);
+
err:
/* the request owns the ref now */
i915_gem_context_unreference(ctx);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 746f77fb57a3..73a4b34d5f76 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2239,6 +2239,7 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&vma->vma_link);
INIT_LIST_HEAD(&vma->mm_list);
INIT_LIST_HEAD(&vma->exec_list);
+ INIT_LIST_HEAD(&vma->error_collection_link);
vma->vm = vm;
vma->obj = obj;
vma->ggtt_view = *view;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index e377c7d27bd4..f6e29a023095 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -159,6 +159,8 @@ struct i915_vma {
/** This vma's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
+ struct list_head error_collection_link;
+
/**
* Used for performing relocations during execbuffer insertion.
*/
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 48ddbf44c862..979fe1976d46 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -407,6 +407,11 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
error->pinned_bo_count[i]);
}
+ for (i = 0; i < error->requested_bo_count; i++)
+ print_error_buffers(m, "Requested",
+ error->requested_bo,
+ error->requested_bo_count);
+
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
obj = error->ring[i].batchbuffer;
if (obj) {
@@ -1071,8 +1076,11 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
int i;
i = 0;
- list_for_each_entry(vma, &vm->active_list, mm_list)
+ list_for_each_entry(vma, &vm->active_list, mm_list) {
+ if (!list_empty(&vma->error_collection_link))
+ error->requested_bo_count++;
i++;
+ }
error->active_bo_count[ndx] = i;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
@@ -1140,6 +1148,36 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
}
}
+static void i915_gem_capture_requested_objects(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error)
+{
+ struct drm_i915_gem_request *req = NULL;
+ int i;
+
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct intel_engine_cs *ring = &dev_priv->ring[i];
+ if (error->ring[i].hangcheck_action == HANGCHECK_HUNG)
+ req = i915_gem_find_active_request(ring);
+ }
+
+ if (!req)
+ return;
+
+ error->requested_bo = kcalloc(error->requested_bo_count,
+ sizeof(*error->requested_bo),
+ GFP_ATOMIC);
+ i = 0;
+
+ while (!list_empty(&req->error_list)) {
+ struct i915_vma *vma;
+ vma = list_first_entry(&req->error_list,
+ struct i915_vma,
+ error_collection_link);
+ list_del_init(&vma->error_collection_link);
+ capture_bo(&error->requested_bo[i++], vma);
+ }
+}
+
/* Capture all registers which don't fit into another category. */
static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
@@ -1273,6 +1311,7 @@ void i915_capture_error_state(struct drm_device *dev, bool wedged,
i915_capture_gen_state(dev_priv, error);
i915_capture_reg_state(dev_priv, error);
i915_gem_capture_buffers(dev_priv, error);
+ i915_gem_capture_requested_objects(dev_priv, error);
i915_gem_record_fences(dev, error);
i915_gem_record_rings(dev, error);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 70e449b702cc..234f72dcf353 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -865,6 +865,7 @@ static int logical_ring_alloc_request(struct intel_engine_cs *ring,
}
}
+ INIT_LIST_HEAD(&request->error_list);
kref_init(&request->ref);
request->ring = ring;
request->uniq = dev_private->request_uniq++;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 0bd3976d88e1..23fd2bfa8d07 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2095,6 +2095,7 @@ intel_ring_alloc_request(struct intel_engine_cs *ring)
if (request == NULL)
return -ENOMEM;
+ INIT_LIST_HEAD(&request->error_list);
kref_init(&request->ref);
request->ring = ring;
request->uniq = dev_private->request_uniq++;
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 6eed16b92a24..9eb7247a4845 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -677,7 +677,8 @@ struct drm_i915_gem_exec_object2 {
#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
#define EXEC_OBJECT_NEEDS_GTT (1<<1)
#define EXEC_OBJECT_WRITE (1<<2)
-#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1)
+#define EXEC_OBJECT_COLLECT_ON_ERROR (1<<3)
+#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_COLLECT_ON_ERROR<<1)
__u64 flags;
__u64 rsvd1;