summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/display/intel_de.h
blob: ba7a1c6ebc2a32ac5e63908c865e17a1785e647a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
/* SPDX-License-Identifier: MIT */
/*
 * Copyright © 2019 Intel Corporation
 */

#ifndef __INTEL_DE_H__
#define __INTEL_DE_H__

#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_uncore.h"

static inline u32
intel_de_read(struct drm_i915_private *i915, i915_reg_t reg)
{
	return intel_uncore_read(&i915->uncore, reg);
}

static inline u8
intel_de_read8(struct drm_i915_private *i915, i915_reg_t reg)
{
	return intel_uncore_read8(&i915->uncore, reg);
}

static inline u64
intel_de_read64_2x32(struct drm_i915_private *i915,
		     i915_reg_t lower_reg, i915_reg_t upper_reg)
{
	return intel_uncore_read64_2x32(&i915->uncore, lower_reg, upper_reg);
}

static inline void
intel_de_posting_read(struct drm_i915_private *i915, i915_reg_t reg)
{
	intel_uncore_posting_read(&i915->uncore, reg);
}

static inline void
intel_de_write(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
{
	intel_uncore_write(&i915->uncore, reg, val);
}

static inline u32
intel_de_rmw(struct drm_i915_private *i915, i915_reg_t reg, u32 clear, u32 set)
{
	return intel_uncore_rmw(&i915->uncore, reg, clear, set);
}

static inline int
intel_de_wait(struct drm_i915_private *i915, i915_reg_t reg,
	      u32 mask, u32 value, unsigned int timeout)
{
	return intel_wait_for_register(&i915->uncore, reg, mask, value, timeout);
}

static inline int
intel_de_wait_fw(struct drm_i915_private *i915, i915_reg_t reg,
		 u32 mask, u32 value, unsigned int timeout)
{
	return intel_wait_for_register_fw(&i915->uncore, reg, mask, value, timeout);
}

static inline int
intel_de_wait_custom(struct drm_i915_private *i915, i915_reg_t reg,
		     u32 mask, u32 value,
		     unsigned int fast_timeout_us,
		     unsigned int slow_timeout_ms, u32 *out_value)
{
	return __intel_wait_for_register(&i915->uncore, reg, mask, value,
					 fast_timeout_us, slow_timeout_ms, out_value);
}

static inline int
intel_de_wait_for_set(struct drm_i915_private *i915, i915_reg_t reg,
		      u32 mask, unsigned int timeout)
{
	return intel_de_wait(i915, reg, mask, mask, timeout);
}

static inline int
intel_de_wait_for_clear(struct drm_i915_private *i915, i915_reg_t reg,
			u32 mask, unsigned int timeout)
{
	return intel_de_wait(i915, reg, mask, 0, timeout);
}

/*
 * Unlocked mmio-accessors, think carefully before using these.
 *
 * Certain architectures will die if the same cacheline is concurrently accessed
 * by different clients (e.g. on Ivybridge). Access to registers should
 * therefore generally be serialised, by either the dev_priv->uncore.lock or
 * a more localised lock guarding all access to that bank of registers.
 */
static inline u32
intel_de_read_fw(struct drm_i915_private *i915, i915_reg_t reg)
{
	u32 val;

	val = intel_uncore_read_fw(&i915->uncore, reg);
	trace_i915_reg_rw(false, reg, val, sizeof(val), true);

	return val;
}

static inline void
intel_de_write_fw(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
{
	trace_i915_reg_rw(true, reg, val, sizeof(val), true);
	intel_uncore_write_fw(&i915->uncore, reg, val);
}

static inline u32
intel_de_read_notrace(struct drm_i915_private *i915, i915_reg_t reg)
{
	return intel_uncore_read_notrace(&i915->uncore, reg);
}

static inline void
intel_de_write_notrace(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
{
	intel_uncore_write_notrace(&i915->uncore, reg, val);
}

#endif /* __INTEL_DE_H__ */