summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorChristian K├Ânig <christian.koenig@amd.com>2013-03-28 13:01:28 +0100
committerTom Stellard <thomas.stellard@amd.com>2013-04-20 00:04:41 -0400
commitde909e67aef8d479b7ed622c3fcc3704e00b3cf4 (patch)
tree16301fb378e2a635ac5f4ac2c6596c346f145ea7 /lib
parente4a4ecc5835c4a6aa146652838bb01e5fcb750cf (diff)
R600/SI: remove register classes from the remaining patterns
Signed-off-by: Christian K??nig <christian.koenig@amd.com>
Diffstat (limited to 'lib')
-rw-r--r--lib/Target/R600/SIInstructions.td39
1 files changed, 19 insertions, 20 deletions
diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td
index 3dfab507137..a321ee1759b 100644
--- a/lib/Target/R600/SIInstructions.td
+++ b/lib/Target/R600/SIInstructions.td
@@ -799,8 +799,8 @@ def V_CNDMASK_B32_e64 : VOP3 <0x00000100, (outs VReg_32:$dst),
//f32 pattern for V_CNDMASK_B32_e64
def : Pat <
- (f32 (select (i1 SSrc_64:$src2), VSrc_32:$src1, VSrc_32:$src0)),
- (V_CNDMASK_B32_e64 VSrc_32:$src0, VSrc_32:$src1, SSrc_64:$src2)
+ (f32 (select i1:$src2, f32:$src1, f32:$src0)),
+ (V_CNDMASK_B32_e64 f32:$src0, f32:$src1, i1:$src2)
>;
defm V_READLANE_B32 : VOP2_32 <0x00000001, "V_READLANE_B32", []>;
@@ -990,18 +990,18 @@ def V_MUL_HI_I32 : VOP3_32 <0x0000016c, "V_MUL_HI_I32", []>;
} // isCommutable = 1
def : Pat <
- (mul VSrc_32:$src0, VReg_32:$src1),
- (V_MUL_LO_I32 VSrc_32:$src0, VReg_32:$src1, (i32 0))
+ (mul i32:$src0, i32:$src1),
+ (V_MUL_LO_I32 i32:$src0, i32:$src1, (i32 0))
>;
def : Pat <
- (mulhu VSrc_32:$src0, VReg_32:$src1),
- (V_MUL_HI_U32 VSrc_32:$src0, VReg_32:$src1, (i32 0))
+ (mulhu i32:$src0, i32:$src1),
+ (V_MUL_HI_U32 i32:$src0, i32:$src1, (i32 0))
>;
def : Pat <
- (mulhs VSrc_32:$src0, VReg_32:$src1),
- (V_MUL_HI_I32 VSrc_32:$src0, VReg_32:$src1, (i32 0))
+ (mulhs i32:$src0, i32:$src1),
+ (V_MUL_HI_I32 i32:$src0, i32:$src1, (i32 0))
>;
def V_DIV_SCALE_F32 : VOP3_32 <0x0000016d, "V_DIV_SCALE_F32", []>;
@@ -1038,15 +1038,15 @@ def S_AND_B64 : SOP2_64 <0x0000000f, "S_AND_B64",
>;
def : Pat <
- (i1 (and SSrc_64:$src0, SSrc_64:$src1)),
- (S_AND_B64 SSrc_64:$src0, SSrc_64:$src1)
+ (i1 (and i1:$src0, i1:$src1)),
+ (S_AND_B64 i1:$src0, i1:$src1)
>;
def S_OR_B32 : SOP2_32 <0x00000010, "S_OR_B32", []>;
def S_OR_B64 : SOP2_64 <0x00000011, "S_OR_B64", []>;
def : Pat <
- (i1 (or SSrc_64:$src0, SSrc_64:$src1)),
- (S_OR_B64 SSrc_64:$src0, SSrc_64:$src1)
+ (i1 (or i1:$src0, i1:$src1)),
+ (S_OR_B64 i1:$src0, i1:$src1)
>;
def S_XOR_B32 : SOP2_32 <0x00000012, "S_XOR_B32", []>;
def S_XOR_B64 : SOP2_64 <0x00000013, "S_XOR_B64", []>;
@@ -1184,8 +1184,8 @@ def SI_INDIRECT_DST_V16 : SI_INDIRECT_DST<VReg_512>;
} // end IsCodeGenOnly, isPseudo
def : Pat<
- (int_AMDGPU_cndlt VReg_32:$src0, VReg_32:$src1, VReg_32:$src2),
- (V_CNDMASK_B32_e64 VReg_32:$src2, VReg_32:$src1, (V_CMP_GT_F32_e64 0, VReg_32:$src0))
+ (int_AMDGPU_cndlt f32:$src0, f32:$src1, f32:$src2),
+ (V_CNDMASK_B32_e64 f32:$src2, f32:$src1, (V_CMP_GT_F32_e64 0, f32:$src0))
>;
def : Pat <
@@ -1195,19 +1195,18 @@ def : Pat <
/* int_SI_vs_load_input */
def : Pat<
- (int_SI_vs_load_input SReg_128:$tlst, IMM12bit:$attr_offset,
- VReg_32:$buf_idx_vgpr),
+ (int_SI_vs_load_input v16i8:$tlst, IMM12bit:$attr_offset,
+ i32:$buf_idx_vgpr),
(BUFFER_LOAD_FORMAT_XYZW imm:$attr_offset, 0, 1, 0, 0, 0,
- VReg_32:$buf_idx_vgpr, SReg_128:$tlst,
- 0, 0, 0)
+ i32:$buf_idx_vgpr, v16i8:$tlst, 0, 0, 0)
>;
/* int_SI_export */
def : Pat <
(int_SI_export imm:$en, imm:$vm, imm:$done, imm:$tgt, imm:$compr,
- VReg_32:$src0,VReg_32:$src1, VReg_32:$src2, VReg_32:$src3),
+ f32:$src0, f32:$src1, f32:$src2, f32:$src3),
(EXP imm:$en, imm:$tgt, imm:$compr, imm:$done, imm:$vm,
- VReg_32:$src0, VReg_32:$src1, VReg_32:$src2, VReg_32:$src3)
+ f32:$src0, f32:$src1, f32:$src2, f32:$src3)
>;
/********** ======================= **********/