AMDGPU: Implement llvm.amdgcn.icmp/fcmp for i16/f16

Also support these on targets without support for these,
since it will allow us to freely create these in instcombine.

llvm-svn: 339819
This commit is contained in:
Matt Arsenault 2018-08-15 21:25:20 +00:00
parent 08e082619a
commit b3a80e5397
4 changed files with 427 additions and 42 deletions

View File

@ -207,6 +207,8 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f16, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom);
@ -3664,6 +3666,69 @@ SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode,
return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL);
}
static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI,
SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
const auto *CD = dyn_cast<ConstantSDNode>(N->getOperand(3));
if (!CD)
return DAG.getUNDEF(VT);
int CondCode = CD->getSExtValue();
if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
return DAG.getUNDEF(VT);
ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
SDValue LHS = N->getOperand(1);
SDValue RHS = N->getOperand(2);
SDLoc DL(N);
EVT CmpVT = LHS.getValueType();
if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) {
unsigned PromoteOp = ICmpInst::isSigned(IcInput) ?
ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS);
RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS);
}
ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
return DAG.getNode(AMDGPUISD::SETCC, DL, VT, LHS, RHS,
DAG.getCondCode(CCOpcode));
}
static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI,
SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
const auto *CD = dyn_cast<ConstantSDNode>(N->getOperand(3));
if (!CD)
return DAG.getUNDEF(VT);
int CondCode = CD->getSExtValue();
if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) {
return DAG.getUNDEF(VT);
}
SDValue Src0 = N->getOperand(1);
SDValue Src1 = N->getOperand(2);
EVT CmpVT = Src0.getValueType();
SDLoc SL(N);
if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) {
Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
}
FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
return DAG.getNode(AMDGPUISD::SETCC, SL, VT, Src0,
Src1, DAG.getCondCode(CCOpcode));
}
void SITargetLowering::ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const {
@ -4950,34 +5015,10 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
Denominator, Numerator);
}
case Intrinsic::amdgcn_icmp: {
const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3));
if (!CD)
return DAG.getUNDEF(VT);
int CondCode = CD->getSExtValue();
if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
return DAG.getUNDEF(VT);
ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1),
Op.getOperand(2), DAG.getCondCode(CCOpcode));
return lowerICMPIntrinsic(*this, Op.getNode(), DAG);
}
case Intrinsic::amdgcn_fcmp: {
const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3));
if (!CD)
return DAG.getUNDEF(VT);
int CondCode = CD->getSExtValue();
if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE)
return DAG.getUNDEF(VT);
FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1),
Op.getOperand(2), DAG.getCondCode(CCOpcode));
return lowerFCMPIntrinsic(*this, Op.getNode(), DAG);
}
case Intrinsic::amdgcn_fmed3:
return DAG.getNode(AMDGPUISD::FMED3, DL, VT,

View File

@ -635,6 +635,17 @@ def : ICMP_Pattern <COND_SGE, V_CMP_GE_I64_e64, i64>;
def : ICMP_Pattern <COND_SLT, V_CMP_LT_I64_e64, i64>;
def : ICMP_Pattern <COND_SLE, V_CMP_LE_I64_e64, i64>;
def : ICMP_Pattern <COND_EQ, V_CMP_EQ_U16_e64, i16>;
def : ICMP_Pattern <COND_NE, V_CMP_NE_U16_e64, i16>;
def : ICMP_Pattern <COND_UGT, V_CMP_GT_U16_e64, i16>;
def : ICMP_Pattern <COND_UGE, V_CMP_GE_U16_e64, i16>;
def : ICMP_Pattern <COND_ULT, V_CMP_LT_U16_e64, i16>;
def : ICMP_Pattern <COND_ULE, V_CMP_LE_U16_e64, i16>;
def : ICMP_Pattern <COND_SGT, V_CMP_GT_I16_e64, i16>;
def : ICMP_Pattern <COND_SGE, V_CMP_GE_I16_e64, i16>;
def : ICMP_Pattern <COND_SLT, V_CMP_LT_I16_e64, i16>;
def : ICMP_Pattern <COND_SLE, V_CMP_LE_I16_e64, i16>;
class FCMP_Pattern <PatLeaf cond, Instruction inst, ValueType vt> : GCNPat <
(i64 (AMDGPUsetcc (vt (VOP3Mods vt:$src0, i32:$src0_modifiers)),
(vt (VOP3Mods vt:$src1, i32:$src1_modifiers)), cond)),
@ -656,6 +667,14 @@ def : FCMP_Pattern <COND_OGE, V_CMP_GE_F64_e64, f64>;
def : FCMP_Pattern <COND_OLT, V_CMP_LT_F64_e64, f64>;
def : FCMP_Pattern <COND_OLE, V_CMP_LE_F64_e64, f64>;
def : FCMP_Pattern <COND_OEQ, V_CMP_EQ_F16_e64, f16>;
def : FCMP_Pattern <COND_ONE, V_CMP_NEQ_F16_e64, f16>;
def : FCMP_Pattern <COND_OGT, V_CMP_GT_F16_e64, f16>;
def : FCMP_Pattern <COND_OGE, V_CMP_GE_F16_e64, f16>;
def : FCMP_Pattern <COND_OLT, V_CMP_LT_F16_e64, f16>;
def : FCMP_Pattern <COND_OLE, V_CMP_LE_F16_e64, f16>;
def : FCMP_Pattern <COND_UEQ, V_CMP_NLG_F32_e64, f32>;
def : FCMP_Pattern <COND_UNE, V_CMP_NEQ_F32_e64, f32>;
def : FCMP_Pattern <COND_UGT, V_CMP_NLE_F32_e64, f32>;
@ -670,6 +689,13 @@ def : FCMP_Pattern <COND_UGE, V_CMP_NLT_F64_e64, f64>;
def : FCMP_Pattern <COND_ULT, V_CMP_NGE_F64_e64, f64>;
def : FCMP_Pattern <COND_ULE, V_CMP_NGT_F64_e64, f64>;
def : FCMP_Pattern <COND_UEQ, V_CMP_NLG_F16_e64, f16>;
def : FCMP_Pattern <COND_UNE, V_CMP_NEQ_F16_e64, f16>;
def : FCMP_Pattern <COND_UGT, V_CMP_NLE_F16_e64, f16>;
def : FCMP_Pattern <COND_UGE, V_CMP_NLT_F16_e64, f16>;
def : FCMP_Pattern <COND_ULT, V_CMP_NGE_F16_e64, f16>;
def : FCMP_Pattern <COND_ULE, V_CMP_NGT_F16_e64, f16>;
//===----------------------------------------------------------------------===//
// Target
//===----------------------------------------------------------------------===//

View File

@ -1,10 +1,13 @@
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s
; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s
declare i64 @llvm.amdgcn.fcmp.f32(float, float, i32) #0
declare i64 @llvm.amdgcn.fcmp.f64(double, double, i32) #0
declare float @llvm.fabs.f32(float) #0
declare i64 @llvm.amdgcn.fcmp.f16(half, half, i32) #0
declare half @llvm.fabs.f16(half) #0
; GCN-LABEL: {{^}}v_fcmp_f32_dynamic_cc:
; GCN: s_endpgm
define amdgpu_kernel void @v_fcmp_f32_dynamic_cc(i64 addrspace(1)* %out, float %src0, float %src1, i32 %cc) {
@ -32,9 +35,9 @@ define amdgpu_kernel void @v_fcmp_f32_oeq_both_operands_with_fabs(i64 addrspace(
ret void
}
; GCN-LABEL: {{^}}v_fcmp:
; GCN-LABEL: {{^}}v_fcmp_f32:
; GCN-NOT: v_cmp_eq_f32_e64
define amdgpu_kernel void @v_fcmp(i64 addrspace(1)* %out, float %src) {
define amdgpu_kernel void @v_fcmp_f32(i64 addrspace(1)* %out, float %src) {
%result = call i64 @llvm.amdgcn.fcmp.f32(float %src, float 100.00, i32 -1)
store i64 %result, i64 addrspace(1)* %out
ret void
@ -233,4 +236,183 @@ define amdgpu_kernel void @v_fcmp_f64_ule(i64 addrspace(1)* %out, double %src) {
ret void
}
; GCN-LABEL: {{^}}v_fcmp_f16_oeq_with_fabs:
; VI: v_cmp_eq_f16_e64 {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}}, |{{v[0-9]+}}|
; SI: v_cvt_f32_f16_e32 [[CVT0:v[0-9]+]], s{{[0-9]+}}
; SI: v_cvt_f32_f16_e64 [[CVT1:v[0-9]+]], |s{{[0-9]+}}|
; SI: v_cmp_eq_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT0]], [[CVT1]]
define amdgpu_kernel void @v_fcmp_f16_oeq_with_fabs(i64 addrspace(1)* %out, half %src, half %a) {
%temp = call half @llvm.fabs.f16(half %a)
%result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half %temp, i32 1)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_fcmp_f16_oeq_both_operands_with_fabs:
; VI: v_cmp_eq_f16_e64 {{s\[[0-9]+:[0-9]+\]}}, |{{s[0-9]+}}|, |{{v[0-9]+}}|
; SI: v_cvt_f32_f16_e64 [[CVT0:v[0-9]+]], |s{{[0-9]+}}|
; SI: v_cvt_f32_f16_e64 [[CVT1:v[0-9]+]], |s{{[0-9]+}}|
; SI: v_cmp_eq_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT0]], [[CVT1]]
define amdgpu_kernel void @v_fcmp_f16_oeq_both_operands_with_fabs(i64 addrspace(1)* %out, half %src, half %a) {
%temp = call half @llvm.fabs.f16(half %a)
%src_input = call half @llvm.fabs.f16(half %src)
%result = call i64 @llvm.amdgcn.fcmp.f16(half %src_input, half %temp, i32 1)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_fcmp_f16:
; GCN-NOT: v_cmp_eq_
define amdgpu_kernel void @v_fcmp_f16(i64 addrspace(1)* %out, half %src) {
%result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 -1)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_fcmp_f16_oeq:
; VI: v_cmp_eq_f16_e64
; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000
; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}}
; SI: v_cmp_eq_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]]
define amdgpu_kernel void @v_fcmp_f16_oeq(i64 addrspace(1)* %out, half %src) {
%result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 1)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_fcmp_f16_one:
; VI: v_cmp_neq_f16_e64
; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000
; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}}
; SI: v_cmp_neq_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]]
define amdgpu_kernel void @v_fcmp_f16_one(i64 addrspace(1)* %out, half %src) {
%result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 6)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_fcmp_f16_ogt:
; VI: v_cmp_gt_f16_e64
; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000
; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}}
; SI: v_cmp_gt_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]]
define amdgpu_kernel void @v_fcmp_f16_ogt(i64 addrspace(1)* %out, half %src) {
%result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 2)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_fcmp_f16_oge:
; VI: v_cmp_ge_f16_e64
; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000
; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}}
; SI: v_cmp_ge_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]]
define amdgpu_kernel void @v_fcmp_f16_oge(i64 addrspace(1)* %out, half %src) {
%result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 3)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_fcmp_f16_olt:
; VI: v_cmp_lt_f16_e64
; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000
; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}}
; SI: v_cmp_lt_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]]
define amdgpu_kernel void @v_fcmp_f16_olt(i64 addrspace(1)* %out, half %src) {
%result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 4)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_fcmp_f16_ole:
; VI: v_cmp_le_f16_e64
; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000
; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}}
; SI: v_cmp_le_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]]
define amdgpu_kernel void @v_fcmp_f16_ole(i64 addrspace(1)* %out, half %src) {
%result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 5)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_fcmp_f16_ueq:
; VI: v_cmp_nlg_f16_e64
; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000
; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}}
; SI: v_cmp_nlg_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]]
define amdgpu_kernel void @v_fcmp_f16_ueq(i64 addrspace(1)* %out, half %src) {
%result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 9)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_fcmp_f16_une:
; VI: v_cmp_neq_f16_e64
; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000
; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}}
; SI: v_cmp_neq_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]]
define amdgpu_kernel void @v_fcmp_f16_une(i64 addrspace(1)* %out, half %src) {
%result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 14)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_fcmp_f16_ugt:
; VI: v_cmp_nle_f16_e64
; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000
; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}}
; SI: v_cmp_nle_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]]
define amdgpu_kernel void @v_fcmp_f16_ugt(i64 addrspace(1)* %out, half %src) {
%result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 10)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_fcmp_f16_uge:
; VI: v_cmp_nlt_f16_e64
; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000
; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}}
; SI: v_cmp_nlt_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]]
define amdgpu_kernel void @v_fcmp_f16_uge(i64 addrspace(1)* %out, half %src) {
%result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 11)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_fcmp_f16_ult:
; VI: v_cmp_nge_f16_e64
; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000
; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}}
; SI: v_cmp_nge_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]]
define amdgpu_kernel void @v_fcmp_f16_ult(i64 addrspace(1)* %out, half %src) {
%result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 12)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_fcmp_f16_ule:
; VI: v_cmp_ngt_f16_e64
; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000
; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}}
; SI: v_cmp_ngt_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]]
define amdgpu_kernel void @v_fcmp_f16_ule(i64 addrspace(1)* %out, half %src) {
%result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 13)
store i64 %result, i64 addrspace(1)* %out
ret void
}
attributes #0 = { nounwind readnone convergent }

View File

@ -1,8 +1,9 @@
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s
; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s
declare i64 @llvm.amdgcn.icmp.i32(i32, i32, i32) #0
declare i64 @llvm.amdgcn.icmp.i64(i64, i64, i32) #0
declare i64 @llvm.amdgcn.icmp.i16(i16, i16, i32) #0
; No crash on invalid input
; GCN-LABEL: {{^}}v_icmp_i32_dynamic_cc:
@ -21,13 +22,14 @@ define amdgpu_kernel void @v_icmp_i32_eq(i64 addrspace(1)* %out, i32 %src) {
ret void
}
; GCN-LABEL: {{^}}v_icmp:
; GCN-LABEL: {{^}}v_icmp_i32:
; GCN-NOT: v_cmp_eq_u32_e64
define amdgpu_kernel void @v_icmp(i64 addrspace(1)* %out, i32 %src) {
define amdgpu_kernel void @v_icmp_i32(i64 addrspace(1)* %out, i32 %src) {
%result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 30)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_icmp_i32_ne:
; GCN: v_cmp_ne_u32_e64
define amdgpu_kernel void @v_icmp_i32_ne(i64 addrspace(1)* %out, i32 %src) {
@ -36,33 +38,33 @@ define amdgpu_kernel void @v_icmp_i32_ne(i64 addrspace(1)* %out, i32 %src) {
ret void
}
; GCN-LABEL: {{^}}v_icmp_u32_ugt:
; GCN-LABEL: {{^}}v_icmp_i32_ugt:
; GCN: v_cmp_gt_u32_e64
define amdgpu_kernel void @v_icmp_u32_ugt(i64 addrspace(1)* %out, i32 %src) {
define amdgpu_kernel void @v_icmp_i32_ugt(i64 addrspace(1)* %out, i32 %src) {
%result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 34)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_icmp_u32_uge:
; GCN-LABEL: {{^}}v_icmp_i32_uge:
; GCN: v_cmp_ge_u32_e64
define amdgpu_kernel void @v_icmp_u32_uge(i64 addrspace(1)* %out, i32 %src) {
define amdgpu_kernel void @v_icmp_i32_uge(i64 addrspace(1)* %out, i32 %src) {
%result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 35)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_icmp_u32_ult:
; GCN-LABEL: {{^}}v_icmp_i32_ult:
; GCN: v_cmp_lt_u32_e64
define amdgpu_kernel void @v_icmp_u32_ult(i64 addrspace(1)* %out, i32 %src) {
define amdgpu_kernel void @v_icmp_i32_ult(i64 addrspace(1)* %out, i32 %src) {
%result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 36)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_icmp_u32_ule:
; GCN-LABEL: {{^}}v_icmp_i32_ule:
; GCN: v_cmp_le_u32_e64
define amdgpu_kernel void @v_icmp_u32_ule(i64 addrspace(1)* %out, i32 %src) {
define amdgpu_kernel void @v_icmp_i32_ule(i64 addrspace(1)* %out, i32 %src) {
%result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 37)
store i64 %result, i64 addrspace(1)* %out
ret void
@ -178,4 +180,138 @@ define amdgpu_kernel void @v_icmp_i64_sle(i64 addrspace(1)* %out, i64 %src) {
ret void
}
; GCN-LABEL: {{^}}v_icmp_i16_dynamic_cc:
; GCN: s_endpgm
define amdgpu_kernel void @v_icmp_i16_dynamic_cc(i64 addrspace(1)* %out, i16 %src, i32 %cc) {
%result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 %cc)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_icmp_i16_eq:
; VI: v_cmp_eq_u16_e64
; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64
; SI-DAG: s_and_b32 [[CVT:s[0-9]+]], s{{[0-9]+}}, 0xffff{{$}}
; SI: v_cmp_eq_u32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]]
define amdgpu_kernel void @v_icmp_i16_eq(i64 addrspace(1)* %out, i16 %src) {
%result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 32)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_icmp_i16:
; GCN-NOT: v_cmp_eq_
define amdgpu_kernel void @v_icmp_i16(i64 addrspace(1)* %out, i16 %src) {
%result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 30)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_icmp_i16_ne:
; VI: v_cmp_ne_u16_e64
; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64
; SI-DAG: s_and_b32 [[CVT:s[0-9]+]], s{{[0-9]+}}, 0xffff{{$}}
; SI: v_cmp_ne_u32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]]
define amdgpu_kernel void @v_icmp_i16_ne(i64 addrspace(1)* %out, i16 %src) {
%result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 33)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_icmp_i16_ugt:
; VI: v_cmp_gt_u16_e64
; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64
; SI-DAG: s_and_b32 [[CVT:s[0-9]+]], s{{[0-9]+}}, 0xffff{{$}}
; SI: v_cmp_gt_u32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]]
define amdgpu_kernel void @v_icmp_i16_ugt(i64 addrspace(1)* %out, i16 %src) {
%result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 34)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_icmp_i16_uge:
; VI: v_cmp_ge_u16_e64
; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64
; SI-DAG: s_and_b32 [[CVT:s[0-9]+]], s{{[0-9]+}}, 0xffff{{$}}
; SI: v_cmp_ge_u32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]]
define amdgpu_kernel void @v_icmp_i16_uge(i64 addrspace(1)* %out, i16 %src) {
%result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 35)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_icmp_i16_ult:
; VI: v_cmp_lt_u16_e64
; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64
; SI-DAG: s_and_b32 [[CVT:s[0-9]+]], s{{[0-9]+}}, 0xffff{{$}}
; SI: v_cmp_lt_u32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]]
define amdgpu_kernel void @v_icmp_i16_ult(i64 addrspace(1)* %out, i16 %src) {
%result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 36)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_icmp_i16_ule:
; VI: v_cmp_le_u16_e64
; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64
; SI-DAG: s_and_b32 [[CVT:s[0-9]+]], s{{[0-9]+}}, 0xffff{{$}}
; SI: v_cmp_le_u32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]]
define amdgpu_kernel void @v_icmp_i16_ule(i64 addrspace(1)* %out, i16 %src) {
%result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 37)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_icmp_i16_sgt:
; VI: v_cmp_gt_i16_e64
; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64
; SI-DAG: s_sext_i32_i16 [[CVT:s[0-9]+]], s{{[0-9]+}}
; SI: v_cmp_gt_i32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]]
define amdgpu_kernel void @v_icmp_i16_sgt(i64 addrspace(1)* %out, i16 %src) #1 {
%result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 38)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_icmp_i16_sge:
; VI: v_cmp_ge_i16_e64
; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64
; SI-DAG: s_sext_i32_i16 [[CVT:s[0-9]+]], s{{[0-9]+}}
; SI: v_cmp_ge_i32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]]
define amdgpu_kernel void @v_icmp_i16_sge(i64 addrspace(1)* %out, i16 %src) {
%result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 39)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_icmp_i16_slt:
; VI: v_cmp_lt_i16_e64
; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64
; SI-DAG: s_sext_i32_i16 [[CVT:s[0-9]+]], s{{[0-9]+}}
; SI: v_cmp_lt_i32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]]
define amdgpu_kernel void @v_icmp_i16_slt(i64 addrspace(1)* %out, i16 %src) {
%result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 40)
store i64 %result, i64 addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_icmp_i16_sle:
; VI: v_cmp_le_i16_e64
; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64
; SI-DAG: s_sext_i32_i16 [[CVT:s[0-9]+]], s{{[0-9]+}}
; SI: v_cmp_le_i32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]]
define amdgpu_kernel void @v_icmp_i16_sle(i64 addrspace(1)* %out, i16 %src) {
%result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 41)
store i64 %result, i64 addrspace(1)* %out
ret void
}
attributes #0 = { nounwind readnone convergent }