summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
blob: f3a5eb807c1cad3b0e62aec3eecff42b45d64f32 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
// SPDX-License-Identifier: MIT
/*
 * Copyright © 2019 Intel Corporation
 */

#include "i915_selftest.h"

#include "selftests/igt_flush_test.h"
#include "selftests/mock_drm.h"
#include "mock_context.h"

static int igt_client_fill(void *arg)
{
	struct intel_context *ce = arg;
	struct drm_i915_private *i915 = ce->gem_context->i915;
	struct drm_i915_gem_object *obj;
	struct rnd_state prng;
	IGT_TIMEOUT(end);
	u32 *vaddr;
	int err = 0;

	prandom_seed_state(&prng, i915_selftest.random_seed);

	do {
		u32 sz = prandom_u32_state(&prng) % SZ_32M;
		u32 val = prandom_u32_state(&prng);
		u32 i;

		sz = round_up(sz, PAGE_SIZE);

		pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val);

		obj = i915_gem_object_create_internal(i915, sz);
		if (IS_ERR(obj)) {
			err = PTR_ERR(obj);
			goto err_flush;
		}

		vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
		if (IS_ERR(vaddr)) {
			err = PTR_ERR(vaddr);
			goto err_put;
		}

		/*
		 * XXX: The goal is move this to get_pages, so try to dirty the
		 * CPU cache first to check that we do the required clflush
		 * before scheduling the blt for !llc platforms. This matches
		 * some version of reality where at get_pages the pages
		 * themselves may not yet be coherent with the GPU(swap-in). If
		 * we are missing the flush then we should see the stale cache
		 * values after we do the set_to_cpu_domain and pick it up as a
		 * test failure.
		 */
		memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size / sizeof(u32));

		if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
			obj->cache_dirty = true;

		err = i915_gem_schedule_fill_pages_blt(obj, ce, obj->mm.pages,
						       &obj->mm.page_sizes,
						       val);
		if (err)
			goto err_unpin;

		/*
		 * XXX: For now do the wait without the object resv lock to
		 * ensure we don't deadlock.
		 */
		err = i915_gem_object_wait(obj,
					   I915_WAIT_INTERRUPTIBLE |
					   I915_WAIT_ALL,
					   MAX_SCHEDULE_TIMEOUT);
		if (err)
			goto err_unpin;

		i915_gem_object_lock(obj);
		err = i915_gem_object_set_to_cpu_domain(obj, false);
		i915_gem_object_unlock(obj);
		if (err)
			goto err_unpin;

		for (i = 0; i < obj->base.size / sizeof(u32); ++i) {
			if (vaddr[i] != val) {
				pr_err("vaddr[%u]=%x, expected=%x\n", i,
				       vaddr[i], val);
				err = -EINVAL;
				goto err_unpin;
			}
		}

		i915_gem_object_unpin_map(obj);
		i915_gem_object_put(obj);
	} while (!time_after(jiffies, end));

	goto err_flush;

err_unpin:
	i915_gem_object_unpin_map(obj);
err_put:
	i915_gem_object_put(obj);
err_flush:
	mutex_lock(&i915->drm.struct_mutex);
	if (igt_flush_test(i915, I915_WAIT_LOCKED))
		err = -EIO;
	mutex_unlock(&i915->drm.struct_mutex);

	if (err == -ENOMEM)
		err = 0;

	return err;
}

int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
{
	static const struct i915_subtest tests[] = {
		SUBTEST(igt_client_fill),
	};

	if (i915_terminally_wedged(i915))
		return 0;

	if (!HAS_ENGINE(i915, BCS0))
		return 0;

	return i915_subtests(tests, i915->engine[BCS0]->kernel_context);
}