summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--generic/lib/shared/vload_impl.ll50
-rw-r--r--generic/lib/shared/vstore_impl.ll41
-rw-r--r--r600/lib/SOURCES2
-rw-r--r--r600/lib/shared/vload.cl92
-rw-r--r--r600/lib/shared/vstore.cl104
5 files changed, 233 insertions, 56 deletions
diff --git a/generic/lib/shared/vload_impl.ll b/generic/lib/shared/vload_impl.ll
index ae719e0..2e70e5f 100644
--- a/generic/lib/shared/vload_impl.ll
+++ b/generic/lib/shared/vload_impl.ll
@@ -1,43 +1,33 @@
; This provides optimized implementations of vload4/8/16 for 32-bit int/uint
-define <2 x i32> @__clc_vload2_impl_i32__global(i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <2 x i32> addrspace(1)*
- %4 = load <2 x i32> addrspace(1)* %3, align 4, !tbaa !3
- ret <2 x i32> %4
+define <2 x i32> @__clc_vload2_i32__addr1(i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <2 x i32> addrspace(1)*
+ %2 = load <2 x i32> addrspace(1)* %1, align 4, !tbaa !3
+ ret <2 x i32> %2
}
-define <3 x i32> @__clc_vload3_impl_i32__global(i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <3 x i32> addrspace(1)*
- %4 = load <3 x i32> addrspace(1)* %3, align 4, !tbaa !3
- ret <3 x i32> %4
+define <3 x i32> @__clc_vload3_i32__addr1(i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <3 x i32> addrspace(1)*
+ %2 = load <3 x i32> addrspace(1)* %1, align 4, !tbaa !3
+ ret <3 x i32> %2
}
-define <4 x i32> @__clc_vload4_impl_i32__global(i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <4 x i32> addrspace(1)*
- %4 = load <4 x i32> addrspace(1)* %3, align 4, !tbaa !3
- ret <4 x i32> %4
+define <4 x i32> @__clc_vload4_i32__addr1(i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <4 x i32> addrspace(1)*
+ %2 = load <4 x i32> addrspace(1)* %1, align 4, !tbaa !3
+ ret <4 x i32> %2
}
-define <8 x i32> @__clc_vload8_impl_i32__global(i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <8 x i32> addrspace(1)*
- %4 = load <8 x i32> addrspace(1)* %3, align 4, !tbaa !3
- ret <8 x i32> %4
+define <8 x i32> @__clc_vload8_i32__addr1(i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <8 x i32> addrspace(1)*
+ %2 = load <8 x i32> addrspace(1)* %1, align 4, !tbaa !3
+ ret <8 x i32> %2
}
-define <16 x i32> @__clc_vload16_impl_i32__global(i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <16 x i32> addrspace(1)*
- %4 = load <16 x i32> addrspace(1)* %3, align 4, !tbaa !3
- ret <16 x i32> %4
+define <16 x i32> @__clc_vload16_i32__addr1(i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <16 x i32> addrspace(1)*
+ %2 = load <16 x i32> addrspace(1)* %1, align 4, !tbaa !3
+ ret <16 x i32> %2
}
!1 = metadata !{metadata !"char", metadata !5}
diff --git a/generic/lib/shared/vstore_impl.ll b/generic/lib/shared/vstore_impl.ll
index 3baab5e..388bce2 100644
--- a/generic/lib/shared/vstore_impl.ll
+++ b/generic/lib/shared/vstore_impl.ll
@@ -1,46 +1,35 @@
; This provides optimized implementations of vstore4/8/16 for 32-bit int/uint
-define void @__clc_vstore2_impl_i32__global(<2 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <2 x i32> addrspace(1)*
- store <2 x i32> %vec, <2 x i32> addrspace(1)* %3, align 4, !tbaa !3
+define void @__clc_vstore2_i32__addr1(<2 x i32> %vec, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <2 x i32> addrspace(1)*
+ store <2 x i32> %vec, <2 x i32> addrspace(1)* %1, align 4, !tbaa !3
ret void
}
-define void @__clc_vstore3_impl_i32__global(<3 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <3 x i32> addrspace(1)*
- store <3 x i32> %vec, <3 x i32> addrspace(1)* %3, align 4, !tbaa !3
+define void @__clc_vstore3_i32__addr1(<3 x i32> %vec, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <3 x i32> addrspace(1)*
+ store <3 x i32> %vec, <3 x i32> addrspace(1)* %1, align 4, !tbaa !3
ret void
}
-define void @__clc_vstore4_impl_i32__global(<4 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <4 x i32> addrspace(1)*
- store <4 x i32> %vec, <4 x i32> addrspace(1)* %3, align 4, !tbaa !3
+define void @__clc_vstore4_i32__addr1(<4 x i32> %vec, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <4 x i32> addrspace(1)*
+ store <4 x i32> %vec, <4 x i32> addrspace(1)* %1, align 4, !tbaa !3
ret void
}
-define void @__clc_vstore8_impl_i32__global(<8 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <8 x i32> addrspace(1)*
- store <8 x i32> %vec, <8 x i32> addrspace(1)* %3, align 4, !tbaa !3
+define void @__clc_vstore8_i32__addr1(<8 x i32> %vec, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <8 x i32> addrspace(1)*
+ store <8 x i32> %vec, <8 x i32> addrspace(1)* %1, align 4, !tbaa !3
ret void
}
-define void @__clc_vstore16_impl_i32__global(<16 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <16 x i32> addrspace(1)*
- store <16 x i32> %vec, <16 x i32> addrspace(1)* %3, align 4, !tbaa !3
+define void @__clc_vstore16_i32__addr1(<16 x i32> %vec, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <16 x i32> addrspace(1)*
+ store <16 x i32> %vec, <16 x i32> addrspace(1)* %1, align 4, !tbaa !3
ret void
}
-
!1 = metadata !{metadata !"char", metadata !5}
!2 = metadata !{metadata !"short", metadata !5}
!3 = metadata !{metadata !"int", metadata !5}
diff --git a/r600/lib/SOURCES b/r600/lib/SOURCES
index 16ef3ac..87df0b7 100644
--- a/r600/lib/SOURCES
+++ b/r600/lib/SOURCES
@@ -4,3 +4,5 @@ workitem/get_local_id.ll
workitem/get_global_size.ll
synchronization/barrier.cl
synchronization/barrier_impl.ll
+shared/vload.cl
+shared/vstore.cl \ No newline at end of file
diff --git a/r600/lib/shared/vload.cl b/r600/lib/shared/vload.cl
new file mode 100644
index 0000000..6144dde
--- /dev/null
+++ b/r600/lib/shared/vload.cl
@@ -0,0 +1,92 @@
+#include <clc/clc.h>
+
+#define VLOAD_VECTORIZE(PRIM_TYPE, ADDR_SPACE) \
+ _CLC_OVERLOAD _CLC_DEF PRIM_TYPE##2 vload2(size_t offset, const ADDR_SPACE PRIM_TYPE *x) { \
+ return (PRIM_TYPE##2)(x[2*offset] , x[2*offset+1]); \
+ } \
+\
+ _CLC_OVERLOAD _CLC_DEF PRIM_TYPE##3 vload3(size_t offset, const ADDR_SPACE PRIM_TYPE *x) { \
+ return (PRIM_TYPE##3)(x[3*offset] , x[3*offset+1], x[3*offset+2]); \
+ } \
+\
+ _CLC_OVERLOAD _CLC_DEF PRIM_TYPE##4 vload4(size_t offset, const ADDR_SPACE PRIM_TYPE *x) { \
+ return (PRIM_TYPE##4)(x[4*offset], x[4*offset+1], x[4*offset+2], x[4*offset+3]); \
+ } \
+\
+ _CLC_OVERLOAD _CLC_DEF PRIM_TYPE##8 vload8(size_t offset, const ADDR_SPACE PRIM_TYPE *x) { \
+ return (PRIM_TYPE##8)(vload4(0, &x[8*offset]), vload4(1, &x[8*offset])); \
+ } \
+\
+ _CLC_OVERLOAD _CLC_DEF PRIM_TYPE##16 vload16(size_t offset, const ADDR_SPACE PRIM_TYPE *x) { \
+ return (PRIM_TYPE##16)(vload8(0, &x[16*offset]), vload8(1, &x[16*offset])); \
+ } \
+
+#define VLOAD_ADDR_SPACES(SCALAR_GENTYPE) \
+ VLOAD_VECTORIZE(SCALAR_GENTYPE, __private) \
+ VLOAD_VECTORIZE(SCALAR_GENTYPE, __local) \
+ VLOAD_VECTORIZE(SCALAR_GENTYPE, __constant) \
+ VLOAD_VECTORIZE(SCALAR_GENTYPE, __global) \
+
+//int/uint are special... see below
+#define VLOAD_TYPES() \
+ VLOAD_ADDR_SPACES(char) \
+ VLOAD_ADDR_SPACES(uchar) \
+ VLOAD_ADDR_SPACES(short) \
+ VLOAD_ADDR_SPACES(ushort) \
+ VLOAD_ADDR_SPACES(long) \
+ VLOAD_ADDR_SPACES(ulong) \
+ VLOAD_ADDR_SPACES(float) \
+
+VLOAD_TYPES()
+
+#ifdef cl_khr_fp64
+#pragma OPENCL EXTENSION cl_khr_fp64 : enable
+ VLOAD_ADDR_SPACES(double)
+#endif
+
+//Assembly overrides start here
+
+VLOAD_VECTORIZE(int, __private)
+VLOAD_VECTORIZE(int, __local)
+VLOAD_VECTORIZE(int, __constant)
+VLOAD_VECTORIZE(uint, __private)
+VLOAD_VECTORIZE(uint, __local)
+VLOAD_VECTORIZE(uint, __constant)
+
+_CLC_OVERLOAD _CLC_DEF int3 vload3(size_t offset, const global int *x) {
+ return (int3)(vload2(0, &x[3*offset]), x[3*offset+2]);
+}
+_CLC_OVERLOAD _CLC_DEF uint3 vload3(size_t offset, const global uint *x) {
+ return (uint3)(vload2(0, &x[3*offset]), x[3*offset+2]);
+}
+
+//We only define functions for typeN vloadN(), and then just bitcast the result for unsigned types
+#define _CLC_VLOAD_ASM_DECL(PRIM_TYPE,LLVM_SCALAR_TYPE,ADDR_SPACE,ADDR_SPACE_ID) \
+_CLC_DECL PRIM_TYPE##2 __clc_vload2_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID (const ADDR_SPACE PRIM_TYPE *); \
+_CLC_DECL PRIM_TYPE##4 __clc_vload4_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID (const ADDR_SPACE PRIM_TYPE *); \
+_CLC_DECL PRIM_TYPE##8 __clc_vload8_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID (const ADDR_SPACE PRIM_TYPE *); \
+_CLC_DECL PRIM_TYPE##16 __clc_vload16_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID (const ADDR_SPACE PRIM_TYPE *); \
+
+#define _CLC_VLOAD_ASM_DEFINE(PRIM_TYPE,S_PRIM_TYPE, LLVM_SCALAR_TYPE,VEC_WIDTH,ADDR_SPACE,ADDR_SPACE_ID) \
+ _CLC_OVERLOAD _CLC_DEF PRIM_TYPE##VEC_WIDTH vload##VEC_WIDTH (size_t offset, const ADDR_SPACE PRIM_TYPE *x) { \
+ return __builtin_astype(__clc_vload##VEC_WIDTH##_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID ((const ADDR_SPACE S_PRIM_TYPE *)&x[VEC_WIDTH * offset]), PRIM_TYPE##VEC_WIDTH); \
+ } \
+
+/*Note: R600 back-end doesn't support load <3 x ?>... so
+ * those functions aren't actually overridden here
+ */
+#define _CLC_VLOAD_ASM_OVERLOAD_SIZES(PRIM_TYPE,S_PRIM_TYPE,LLVM_TYPE,ADDR_SPACE,ADDR_SPACE_ID) \
+ _CLC_VLOAD_ASM_DEFINE(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, 2, ADDR_SPACE, ADDR_SPACE_ID) \
+ _CLC_VLOAD_ASM_DEFINE(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, 4, ADDR_SPACE, ADDR_SPACE_ID) \
+ _CLC_VLOAD_ASM_DEFINE(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, 8, ADDR_SPACE, ADDR_SPACE_ID) \
+ _CLC_VLOAD_ASM_DEFINE(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, 16, ADDR_SPACE, ADDR_SPACE_ID) \
+
+#define _CLC_VLOAD_ASM_OVERLOAD_ADDR_SPACES(PRIM_TYPE,S_PRIM_TYPE,LLVM_TYPE) \
+ _CLC_VLOAD_ASM_OVERLOAD_SIZES(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, global, 1) \
+
+#define _CLC_VLOAD_ASM_OVERLOADS() \
+ _CLC_VLOAD_ASM_DECL(int,i32,__global,1) \
+ _CLC_VLOAD_ASM_OVERLOAD_ADDR_SPACES(int,int,i32) \
+ _CLC_VLOAD_ASM_OVERLOAD_ADDR_SPACES(uint,int,i32) \
+
+_CLC_VLOAD_ASM_OVERLOADS() \ No newline at end of file
diff --git a/r600/lib/shared/vstore.cl b/r600/lib/shared/vstore.cl
new file mode 100644
index 0000000..a150849
--- /dev/null
+++ b/r600/lib/shared/vstore.cl
@@ -0,0 +1,104 @@
+#include <clc/clc.h>
+
+#pragma OPENCL EXTENSION cl_khr_byte_addressable_store : enable
+
+#define VSTORE_VECTORIZE(PRIM_TYPE, ADDR_SPACE) \
+ _CLC_OVERLOAD _CLC_DEF void vstore2(PRIM_TYPE##2 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \
+ mem[2*offset] = vec.s0; \
+ mem[2*offset+1] = vec.s1; \
+ } \
+\
+ _CLC_OVERLOAD _CLC_DEF void vstore3(PRIM_TYPE##3 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \
+ mem[3*offset] = vec.s0; \
+ mem[3*offset+1] = vec.s1; \
+ mem[3*offset+2] = vec.s2; \
+ } \
+\
+ _CLC_OVERLOAD _CLC_DEF void vstore4(PRIM_TYPE##4 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \
+ vstore2(vec.lo, 0, &mem[offset*4]); \
+ vstore2(vec.hi, 1, &mem[offset*4]); \
+ } \
+\
+ _CLC_OVERLOAD _CLC_DEF void vstore8(PRIM_TYPE##8 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \
+ vstore4(vec.lo, 0, &mem[offset*8]); \
+ vstore4(vec.hi, 1, &mem[offset*8]); \
+ } \
+\
+ _CLC_OVERLOAD _CLC_DEF void vstore16(PRIM_TYPE##16 vec, size_t offset, ADDR_SPACE PRIM_TYPE *mem) { \
+ vstore8(vec.lo, 0, &mem[offset*16]); \
+ vstore8(vec.hi, 1, &mem[offset*16]); \
+ } \
+
+#define VSTORE_ADDR_SPACES(SCALAR_GENTYPE) \
+ VSTORE_VECTORIZE(SCALAR_GENTYPE, __private) \
+ VSTORE_VECTORIZE(SCALAR_GENTYPE, __local) \
+ VSTORE_VECTORIZE(SCALAR_GENTYPE, __global) \
+
+//int/uint are special... see below
+#define VSTORE_TYPES() \
+ VSTORE_ADDR_SPACES(char) \
+ VSTORE_ADDR_SPACES(uchar) \
+ VSTORE_ADDR_SPACES(short) \
+ VSTORE_ADDR_SPACES(ushort) \
+ VSTORE_ADDR_SPACES(long) \
+ VSTORE_ADDR_SPACES(ulong) \
+ VSTORE_ADDR_SPACES(float) \
+
+VSTORE_TYPES()
+
+#ifdef cl_khr_fp64
+#pragma OPENCL EXTENSION cl_khr_fp64 : enable
+ VSTORE_ADDR_SPACES(double)
+#endif
+
+VSTORE_VECTORIZE(int, __private)
+VSTORE_VECTORIZE(int, __local)
+VSTORE_VECTORIZE(uint, __private)
+VSTORE_VECTORIZE(uint, __local)
+
+_CLC_OVERLOAD _CLC_DEF void vstore3(int3 vec, size_t offset, global int *mem) {
+ mem[3*offset] = vec.s0;
+ mem[3*offset+1] = vec.s1;
+ mem[3*offset+2] = vec.s2;
+}
+_CLC_OVERLOAD _CLC_DEF void vstore3(uint3 vec, size_t offset, global uint *mem) {
+ mem[3*offset] = vec.s0;
+ mem[3*offset+1] = vec.s1;
+ mem[3*offset+2] = vec.s2;
+}
+
+/*Note: R600 doesn't support store <3 x ?>... so
+ * those functions aren't actually overridden here... lowest-common-denominator
+ */
+
+//We only define functions for signed_type vstoreN(), and then just cast the pointers/vectors for unsigned types
+#define _CLC_VSTORE_ASM_DECL(PRIM_TYPE,LLVM_SCALAR_TYPE,ADDR_SPACE,ADDR_SPACE_ID) \
+_CLC_DECL void __clc_vstore2_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID (PRIM_TYPE##2, ADDR_SPACE PRIM_TYPE *); \
+_CLC_DECL void __clc_vstore4_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID (PRIM_TYPE##4, ADDR_SPACE PRIM_TYPE *); \
+_CLC_DECL void __clc_vstore8_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID (PRIM_TYPE##8, ADDR_SPACE PRIM_TYPE *); \
+_CLC_DECL void __clc_vstore16_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID (PRIM_TYPE##16, ADDR_SPACE PRIM_TYPE *); \
+
+#define _CLC_VSTORE_ASM_DEFINE(PRIM_TYPE, S_PRIM_TYPE, LLVM_SCALAR_TYPE, VEC_WIDTH, ADDR_SPACE, ADDR_SPACE_ID) \
+ _CLC_OVERLOAD _CLC_DEF void vstore##VEC_WIDTH(PRIM_TYPE##VEC_WIDTH vec, size_t offset, ADDR_SPACE PRIM_TYPE *x) { \
+ __clc_vstore##VEC_WIDTH##_##LLVM_SCALAR_TYPE##__addr##ADDR_SPACE_ID (__builtin_astype(vec, S_PRIM_TYPE##VEC_WIDTH), (ADDR_SPACE S_PRIM_TYPE *)&x[ VEC_WIDTH * offset]); \
+ } \
+
+/*Note: R600 back-end doesn't support load <3 x ?>... so
+ * those functions aren't actually overridden here... When the back-end supports
+ * that, then clean add here, and remove the vstore3 definitions from above.
+ */
+#define _CLC_VSTORE_ASM_OVERLOAD_SIZES(PRIM_TYPE,S_PRIM_TYPE,LLVM_TYPE,ADDR_SPACE,ADDR_SPACE_ID) \
+ _CLC_VSTORE_ASM_DEFINE(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, 2, ADDR_SPACE, ADDR_SPACE_ID) \
+ _CLC_VSTORE_ASM_DEFINE(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, 4, ADDR_SPACE, ADDR_SPACE_ID) \
+ _CLC_VSTORE_ASM_DEFINE(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, 8, ADDR_SPACE, ADDR_SPACE_ID) \
+ _CLC_VSTORE_ASM_DEFINE(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, 16, ADDR_SPACE, ADDR_SPACE_ID) \
+
+#define _CLC_VSTORE_ASM_OVERLOAD_ADDR_SPACES(PRIM_TYPE,S_PRIM_TYPE,LLVM_TYPE) \
+ _CLC_VSTORE_ASM_OVERLOAD_SIZES(PRIM_TYPE, S_PRIM_TYPE, LLVM_TYPE, global, 1) \
+
+#define _CLC_VSTORE_ASM_OVERLOADS() \
+ _CLC_VSTORE_ASM_DECL(int,i32,__global,1) \
+ _CLC_VSTORE_ASM_OVERLOAD_ADDR_SPACES(int,int,i32) \
+ _CLC_VSTORE_ASM_OVERLOAD_ADDR_SPACES(uint,int,i32) \
+
+_CLC_VSTORE_ASM_OVERLOADS() \ No newline at end of file