diff --git a/llvm/lib/Target/R600/SIInstructions.td b/llvm/lib/Target/R600/SIInstructions.td index 0f94164665c3..ffa45c55f3b6 100644 --- a/llvm/lib/Target/R600/SIInstructions.td +++ b/llvm/lib/Target/R600/SIInstructions.td @@ -176,19 +176,19 @@ defm V_CMPX_TRU_F32 : VOPC_32 <0x0000001f, "V_CMPX_TRU_F32">; } // End hasSideEffects = 1, Defs = [EXEC] defm V_CMP_F_F64 : VOPC_64 <0x00000020, "V_CMP_F_F64">; -defm V_CMP_LT_F64 : VOPC_64 <0x00000021, "V_CMP_LT_F64">; -defm V_CMP_EQ_F64 : VOPC_64 <0x00000022, "V_CMP_EQ_F64">; -defm V_CMP_LE_F64 : VOPC_64 <0x00000023, "V_CMP_LE_F64">; -defm V_CMP_GT_F64 : VOPC_64 <0x00000024, "V_CMP_GT_F64">; +defm V_CMP_LT_F64 : VOPC_64 <0x00000021, "V_CMP_LT_F64", f64, COND_LT>; +defm V_CMP_EQ_F64 : VOPC_64 <0x00000022, "V_CMP_EQ_F64", f64, COND_EQ>; +defm V_CMP_LE_F64 : VOPC_64 <0x00000023, "V_CMP_LE_F64", f64, COND_LE>; +defm V_CMP_GT_F64 : VOPC_64 <0x00000024, "V_CMP_GT_F64", f64, COND_GT>; defm V_CMP_LG_F64 : VOPC_64 <0x00000025, "V_CMP_LG_F64">; -defm V_CMP_GE_F64 : VOPC_64 <0x00000026, "V_CMP_GE_F64">; +defm V_CMP_GE_F64 : VOPC_64 <0x00000026, "V_CMP_GE_F64", f64, COND_GE>; defm V_CMP_O_F64 : VOPC_64 <0x00000027, "V_CMP_O_F64">; defm V_CMP_U_F64 : VOPC_64 <0x00000028, "V_CMP_U_F64">; defm V_CMP_NGE_F64 : VOPC_64 <0x00000029, "V_CMP_NGE_F64">; defm V_CMP_NLG_F64 : VOPC_64 <0x0000002a, "V_CMP_NLG_F64">; defm V_CMP_NGT_F64 : VOPC_64 <0x0000002b, "V_CMP_NGT_F64">; defm V_CMP_NLE_F64 : VOPC_64 <0x0000002c, "V_CMP_NLE_F64">; -defm V_CMP_NEQ_F64 : VOPC_64 <0x0000002d, "V_CMP_NEQ_F64">; +defm V_CMP_NEQ_F64 : VOPC_64 <0x0000002d, "V_CMP_NEQ_F64", f64, COND_NE>; defm V_CMP_NLT_F64 : VOPC_64 <0x0000002e, "V_CMP_NLT_F64">; defm V_CMP_TRU_F64 : VOPC_64 <0x0000002f, "V_CMP_TRU_F64">; @@ -827,6 +827,18 @@ def : Pat < (V_CNDMASK_B32_e64 $src0, $src1, $src2) >; +//use two V_CNDMASK_B32_e64 instructions for f64 +def : Pat < + (f64 (select i1:$src2, f64:$src1, f64:$src0)), + (INSERT_SUBREG (INSERT_SUBREG (f64 (IMPLICIT_DEF)), + (V_CNDMASK_B32_e64 (EXTRACT_SUBREG $src0, sub0), + (EXTRACT_SUBREG $src1, sub0), + $src2), sub0), + (V_CNDMASK_B32_e64 (EXTRACT_SUBREG $src0, sub1), + (EXTRACT_SUBREG $src1, sub1), + $src2), sub1) +>; + defm V_READLANE_B32 : VOP2_32 <0x00000001, "V_READLANE_B32", []>; defm V_WRITELANE_B32 : VOP2_32 <0x00000002, "V_WRITELANE_B32", []>; diff --git a/llvm/test/CodeGen/R600/fcmp64.ll b/llvm/test/CodeGen/R600/fcmp64.ll new file mode 100644 index 000000000000..8f2513b405ad --- /dev/null +++ b/llvm/test/CodeGen/R600/fcmp64.ll @@ -0,0 +1,79 @@ +; RUN: llc < %s -march=r600 -mcpu=tahiti | FileCheck %s + +; CHECK: @flt_f64 +; CHECK: V_CMP_LT_F64_e64 {{SGPR[0-9]+_SGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+}} + +define void @flt_f64(double addrspace(1)* %out, double addrspace(1)* %in1, + double addrspace(1)* %in2) { + %r0 = load double addrspace(1)* %in1 + %r1 = load double addrspace(1)* %in2 + %r2 = fcmp ult double %r0, %r1 + %r3 = select i1 %r2, double %r0, double %r1 + store double %r3, double addrspace(1)* %out + ret void +} + +; CHECK: @fle_f64 +; CHECK: V_CMP_LE_F64_e64 {{SGPR[0-9]+_SGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+}} + +define void @fle_f64(double addrspace(1)* %out, double addrspace(1)* %in1, + double addrspace(1)* %in2) { + %r0 = load double addrspace(1)* %in1 + %r1 = load double addrspace(1)* %in2 + %r2 = fcmp ule double %r0, %r1 + %r3 = select i1 %r2, double %r0, double %r1 + store double %r3, double addrspace(1)* %out + ret void +} + +; CHECK: @fgt_f64 +; CHECK: V_CMP_GT_F64_e64 {{SGPR[0-9]+_SGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+}} + +define void @fgt_f64(double addrspace(1)* %out, double addrspace(1)* %in1, + double addrspace(1)* %in2) { + %r0 = load double addrspace(1)* %in1 + %r1 = load double addrspace(1)* %in2 + %r2 = fcmp ugt double %r0, %r1 + %r3 = select i1 %r2, double %r0, double %r1 + store double %r3, double addrspace(1)* %out + ret void +} + +; CHECK: @fge_f64 +; CHECK: V_CMP_GE_F64_e64 {{SGPR[0-9]+_SGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+}} + +define void @fge_f64(double addrspace(1)* %out, double addrspace(1)* %in1, + double addrspace(1)* %in2) { + %r0 = load double addrspace(1)* %in1 + %r1 = load double addrspace(1)* %in2 + %r2 = fcmp uge double %r0, %r1 + %r3 = select i1 %r2, double %r0, double %r1 + store double %r3, double addrspace(1)* %out + ret void +} + +; CHECK: @fne_f64 +; CHECK: V_CMP_NEQ_F64_e64 {{SGPR[0-9]+_SGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+}} + +define void @fne_f64(double addrspace(1)* %out, double addrspace(1)* %in1, + double addrspace(1)* %in2) { + %r0 = load double addrspace(1)* %in1 + %r1 = load double addrspace(1)* %in2 + %r2 = fcmp une double %r0, %r1 + %r3 = select i1 %r2, double %r0, double %r1 + store double %r3, double addrspace(1)* %out + ret void +} + +; CHECK: @feq_f64 +; CHECK: V_CMP_EQ_F64_e64 {{SGPR[0-9]+_SGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+}} + +define void @feq_f64(double addrspace(1)* %out, double addrspace(1)* %in1, + double addrspace(1)* %in2) { + %r0 = load double addrspace(1)* %in1 + %r1 = load double addrspace(1)* %in2 + %r2 = fcmp ueq double %r0, %r1 + %r3 = select i1 %r2, double %r0, double %r1 + store double %r3, double addrspace(1)* %out + ret void +}