summaryrefslogtreecommitdiff
path: root/test/CodeGen/ARM64/fp128.ll
blob: 57bbb93e12b747b96ed0ab71f64862c6a165dbef (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
; RUN: llc -mtriple=arm64-linux-gnu -verify-machineinstrs -mcpu=cyclone < %s | FileCheck %s

@lhs = global fp128 zeroinitializer, align 16
@rhs = global fp128 zeroinitializer, align 16

define fp128 @test_add() {
; CHECK-LABEL: test_add:

  %lhs = load fp128* @lhs, align 16
  %rhs = load fp128* @rhs, align 16
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]

  %val = fadd fp128 %lhs, %rhs
; CHECK: bl __addtf3
  ret fp128 %val
}

define fp128 @test_sub() {
; CHECK-LABEL: test_sub:

  %lhs = load fp128* @lhs, align 16
  %rhs = load fp128* @rhs, align 16
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]

  %val = fsub fp128 %lhs, %rhs
; CHECK: bl __subtf3
  ret fp128 %val
}

define fp128 @test_mul() {
; CHECK-LABEL: test_mul:

  %lhs = load fp128* @lhs, align 16
  %rhs = load fp128* @rhs, align 16
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]

  %val = fmul fp128 %lhs, %rhs
; CHECK: bl __multf3
  ret fp128 %val
}

define fp128 @test_div() {
; CHECK-LABEL: test_div:

  %lhs = load fp128* @lhs, align 16
  %rhs = load fp128* @rhs, align 16
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]

  %val = fdiv fp128 %lhs, %rhs
; CHECK: bl __divtf3
  ret fp128 %val
}

@var32 = global i32 0
@var64 = global i64 0

define void @test_fptosi() {
; CHECK-LABEL: test_fptosi:
  %val = load fp128* @lhs, align 16

  %val32 = fptosi fp128 %val to i32
  store i32 %val32, i32* @var32
; CHECK: bl __fixtfsi

  %val64 = fptosi fp128 %val to i64
  store i64 %val64, i64* @var64
; CHECK: bl __fixtfdi

  ret void
}

define void @test_fptoui() {
; CHECK-LABEL: test_fptoui:
  %val = load fp128* @lhs, align 16

  %val32 = fptoui fp128 %val to i32
  store i32 %val32, i32* @var32
; CHECK: bl __fixunstfsi

  %val64 = fptoui fp128 %val to i64
  store i64 %val64, i64* @var64
; CHECK: bl __fixunstfdi

  ret void
}

define void @test_sitofp() {
; CHECK-LABEL: test_sitofp:

  %src32 = load i32* @var32
  %val32 = sitofp i32 %src32 to fp128
  store volatile fp128 %val32, fp128* @lhs
; CHECK: bl __floatsitf

  %src64 = load i64* @var64
  %val64 = sitofp i64 %src64 to fp128
  store volatile fp128 %val64, fp128* @lhs
; CHECK: bl __floatditf

  ret void
}

define void @test_uitofp() {
; CHECK-LABEL: test_uitofp:

  %src32 = load i32* @var32
  %val32 = uitofp i32 %src32 to fp128
  store volatile fp128 %val32, fp128* @lhs
; CHECK: bl __floatunsitf

  %src64 = load i64* @var64
  %val64 = uitofp i64 %src64 to fp128
  store volatile fp128 %val64, fp128* @lhs
; CHECK: bl __floatunditf

  ret void
}

define i1 @test_setcc1() {
; CHECK-LABEL: test_setcc1:

  %lhs = load fp128* @lhs, align 16
  %rhs = load fp128* @rhs, align 16
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]

; Technically, everything after the call to __letf2 is redundant, but we'll let
; LLVM have its fun for now.
  %val = fcmp ole fp128 %lhs, %rhs
; CHECK: bl __letf2
; CHECK: cmp w0, #0
; CHECK: cset w0, le

  ret i1 %val
; CHECK: ret
}

define i1 @test_setcc2() {
; CHECK-LABEL: test_setcc2:

  %lhs = load fp128* @lhs, align 16
  %rhs = load fp128* @rhs, align 16
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]

  %val = fcmp ugt fp128 %lhs, %rhs
; CHECK: bl      __gttf2
; CHECK: cmp     w0, #0
; CHECK: cset   [[GT:w[0-9]+]], gt

; CHECK: bl      __unordtf2
; CHECK: cmp     w0, #0
; CHECK: cset   [[UNORDERED:w[0-9]+]], ne
; CHECK: orr     w0, [[UNORDERED]], [[GT]]

  ret i1 %val
; CHECK: ret
}

define i32 @test_br_cc() {
; CHECK-LABEL: test_br_cc:

  %lhs = load fp128* @lhs, align 16
  %rhs = load fp128* @rhs, align 16
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]

  ; olt == !uge, which LLVM unfortunately "optimizes" this to.
  %cond = fcmp olt fp128 %lhs, %rhs
; CHECK: bl      __getf2
; CHECK: cmp     w0, #0
; CHECK: cset   [[OGE:w[0-9]+]], ge

; CHECK: bl      __unordtf2
; CHECK: cmp     w0, #0
; CHECK: cset   [[UNORDERED:w[0-9]+]], ne

; CHECK: orr     [[UGE:w[0-9]+]], [[UNORDERED]], [[OGE]]
; CHECK: cbnz [[UGE]], [[RET29:.LBB[0-9]+_[0-9]+]]
  br i1 %cond, label %iftrue, label %iffalse

iftrue:
  ret i32 42
; CHECK-NEXT: BB#
; CHECK-NEXT: movz w0, #0x2a
; CHECK-NEXT: b [[REALRET:.LBB[0-9]+_[0-9]+]]

iffalse:
  ret i32 29
; CHECK: [[RET29]]:
; CHECK-NEXT: movz w0, #0x1d
; CHECK-NEXT: [[REALRET]]:
; CHECK: ret
}

define void @test_select(i1 %cond, fp128 %lhs, fp128 %rhs) {
; CHECK-LABEL: test_select:

  %val = select i1 %cond, fp128 %lhs, fp128 %rhs
  store fp128 %val, fp128* @lhs, align 16
; CHECK: tst w0, #0x1
; CHECK-NEXT: b.eq [[IFFALSE:.LBB[0-9]+_[0-9]+]]
; CHECK-NEXT: BB#
; CHECK-NEXT: mov v[[VAL:[0-9]+]].16b, v0.16b
; CHECK-NEXT: [[IFFALSE]]:
; CHECK: str q[[VAL]], [{{x[0-9]+}}, :lo12:lhs]
  ret void
; CHECK: ret
}

@varfloat = global float 0.0, align 4
@vardouble = global double 0.0, align 8

define void @test_round() {
; CHECK-LABEL: test_round:

  %val = load fp128* @lhs, align 16

  %float = fptrunc fp128 %val to float
  store float %float, float* @varfloat, align 4
; CHECK: bl __trunctfsf2
; CHECK: str s0, [{{x[0-9]+}}, :lo12:varfloat]

  %double = fptrunc fp128 %val to double
  store double %double, double* @vardouble, align 8
; CHECK: bl __trunctfdf2
; CHECK: str d0, [{{x[0-9]+}}, :lo12:vardouble]

  ret void
}

define void @test_extend() {
; CHECK-LABEL: test_extend:

  %val = load fp128* @lhs, align 16

  %float = load float* @varfloat
  %fromfloat = fpext float %float to fp128
  store volatile fp128 %fromfloat, fp128* @lhs, align 16
; CHECK: bl __extendsftf2
; CHECK: str q0, [{{x[0-9]+}}, :lo12:lhs]

  %double = load double* @vardouble
  %fromdouble = fpext double %double to fp128
  store volatile fp128 %fromdouble, fp128* @lhs, align 16
; CHECK: bl __extenddftf2
; CHECK: str q0, [{{x[0-9]+}}, :lo12:lhs]

  ret void
; CHECK: ret
}

define fp128 @test_neg(fp128 %in) {
; CHECK: [[MINUS0:.LCPI[0-9]+_0]]:
; Make sure the weird hex constant below *is* -0.0
; CHECK-NEXT: fp128 -0

; CHECK-LABEL: test_neg:

  ; Could in principle be optimized to fneg which we can't select, this makes
  ; sure that doesn't happen.
  %ret = fsub fp128 0xL00000000000000008000000000000000, %in
; CHECK: mov v1.16b, v0.16b
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:[[MINUS0]]]
; CHECK: bl __subtf3

  ret fp128 %ret
; CHECK: ret
}