summaryrefslogtreecommitdiff
path: root/generic
diff options
context:
space:
mode:
authorAaron Watry <awatry@gmail.com>2013-07-16 14:29:01 +0000
committerAaron Watry <awatry@gmail.com>2013-07-16 14:29:01 +0000
commit2b80a46a5b9b0836e7a8cf4e6fbd85c332302398 (patch)
treefd149beaf7da04c2ad5772310781588a53a94733 /generic
parentcfdac80e2cb66d091cf0b70cd0a0c1f258d14005 (diff)
Fix and re-enable R600 vload/vstore assembly
The assembly optimizations were making unsafe assumptions about which address spaces had which identifiers. Also, fix vload/vstore with 64-bit pointers. This was broken previously on Radeon SI. This version still only has assembly versions of int/uint 2/4/8/16 for global loads and stores on R600, but it does it in a way that would be very easily extended to private/local/constant and could also be handled easily on other architectures. v2: 1) Leave v[load|store]_impl.ll in generic/lib 2) Remove vload_if.ll and vstore_if.ll interfaces 3) Fix address+offset calculations 3) Remove offset from assembly arg list git-svn-id: https://llvm.org/svn/llvm-project/libclc/trunk@186416 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'generic')
-rw-r--r--generic/lib/shared/vload_impl.ll50
-rw-r--r--generic/lib/shared/vstore_impl.ll41
2 files changed, 35 insertions, 56 deletions
diff --git a/generic/lib/shared/vload_impl.ll b/generic/lib/shared/vload_impl.ll
index ae719e0..2e70e5f 100644
--- a/generic/lib/shared/vload_impl.ll
+++ b/generic/lib/shared/vload_impl.ll
@@ -1,43 +1,33 @@
; This provides optimized implementations of vload4/8/16 for 32-bit int/uint
-define <2 x i32> @__clc_vload2_impl_i32__global(i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <2 x i32> addrspace(1)*
- %4 = load <2 x i32> addrspace(1)* %3, align 4, !tbaa !3
- ret <2 x i32> %4
+define <2 x i32> @__clc_vload2_i32__addr1(i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <2 x i32> addrspace(1)*
+ %2 = load <2 x i32> addrspace(1)* %1, align 4, !tbaa !3
+ ret <2 x i32> %2
}
-define <3 x i32> @__clc_vload3_impl_i32__global(i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <3 x i32> addrspace(1)*
- %4 = load <3 x i32> addrspace(1)* %3, align 4, !tbaa !3
- ret <3 x i32> %4
+define <3 x i32> @__clc_vload3_i32__addr1(i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <3 x i32> addrspace(1)*
+ %2 = load <3 x i32> addrspace(1)* %1, align 4, !tbaa !3
+ ret <3 x i32> %2
}
-define <4 x i32> @__clc_vload4_impl_i32__global(i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <4 x i32> addrspace(1)*
- %4 = load <4 x i32> addrspace(1)* %3, align 4, !tbaa !3
- ret <4 x i32> %4
+define <4 x i32> @__clc_vload4_i32__addr1(i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <4 x i32> addrspace(1)*
+ %2 = load <4 x i32> addrspace(1)* %1, align 4, !tbaa !3
+ ret <4 x i32> %2
}
-define <8 x i32> @__clc_vload8_impl_i32__global(i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <8 x i32> addrspace(1)*
- %4 = load <8 x i32> addrspace(1)* %3, align 4, !tbaa !3
- ret <8 x i32> %4
+define <8 x i32> @__clc_vload8_i32__addr1(i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <8 x i32> addrspace(1)*
+ %2 = load <8 x i32> addrspace(1)* %1, align 4, !tbaa !3
+ ret <8 x i32> %2
}
-define <16 x i32> @__clc_vload16_impl_i32__global(i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <16 x i32> addrspace(1)*
- %4 = load <16 x i32> addrspace(1)* %3, align 4, !tbaa !3
- ret <16 x i32> %4
+define <16 x i32> @__clc_vload16_i32__addr1(i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <16 x i32> addrspace(1)*
+ %2 = load <16 x i32> addrspace(1)* %1, align 4, !tbaa !3
+ ret <16 x i32> %2
}
!1 = metadata !{metadata !"char", metadata !5}
diff --git a/generic/lib/shared/vstore_impl.ll b/generic/lib/shared/vstore_impl.ll
index 3baab5e..388bce2 100644
--- a/generic/lib/shared/vstore_impl.ll
+++ b/generic/lib/shared/vstore_impl.ll
@@ -1,46 +1,35 @@
; This provides optimized implementations of vstore4/8/16 for 32-bit int/uint
-define void @__clc_vstore2_impl_i32__global(<2 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <2 x i32> addrspace(1)*
- store <2 x i32> %vec, <2 x i32> addrspace(1)* %3, align 4, !tbaa !3
+define void @__clc_vstore2_i32__addr1(<2 x i32> %vec, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <2 x i32> addrspace(1)*
+ store <2 x i32> %vec, <2 x i32> addrspace(1)* %1, align 4, !tbaa !3
ret void
}
-define void @__clc_vstore3_impl_i32__global(<3 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <3 x i32> addrspace(1)*
- store <3 x i32> %vec, <3 x i32> addrspace(1)* %3, align 4, !tbaa !3
+define void @__clc_vstore3_i32__addr1(<3 x i32> %vec, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <3 x i32> addrspace(1)*
+ store <3 x i32> %vec, <3 x i32> addrspace(1)* %1, align 4, !tbaa !3
ret void
}
-define void @__clc_vstore4_impl_i32__global(<4 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <4 x i32> addrspace(1)*
- store <4 x i32> %vec, <4 x i32> addrspace(1)* %3, align 4, !tbaa !3
+define void @__clc_vstore4_i32__addr1(<4 x i32> %vec, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <4 x i32> addrspace(1)*
+ store <4 x i32> %vec, <4 x i32> addrspace(1)* %1, align 4, !tbaa !3
ret void
}
-define void @__clc_vstore8_impl_i32__global(<8 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <8 x i32> addrspace(1)*
- store <8 x i32> %vec, <8 x i32> addrspace(1)* %3, align 4, !tbaa !3
+define void @__clc_vstore8_i32__addr1(<8 x i32> %vec, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <8 x i32> addrspace(1)*
+ store <8 x i32> %vec, <8 x i32> addrspace(1)* %1, align 4, !tbaa !3
ret void
}
-define void @__clc_vstore16_impl_i32__global(<16 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <16 x i32> addrspace(1)*
- store <16 x i32> %vec, <16 x i32> addrspace(1)* %3, align 4, !tbaa !3
+define void @__clc_vstore16_i32__addr1(<16 x i32> %vec, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <16 x i32> addrspace(1)*
+ store <16 x i32> %vec, <16 x i32> addrspace(1)* %1, align 4, !tbaa !3
ret void
}
-
!1 = metadata !{metadata !"char", metadata !5}
!2 = metadata !{metadata !"short", metadata !5}
!3 = metadata !{metadata !"int", metadata !5}