[AArch64] Avoid going through GPRs for across-vector instructions.

This adds new node types for each intrinsic.
For instance, for addv, we have AArch64ISD::UADDV, such that:
  (v4i32 (uaddv ...))
is the same as
  (v4i32 (scalar_to_vector (i32 (int_aarch64_neon_uaddv ...))))
that is,
  (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
           (i32 (int_aarch64_neon_uaddv ...)), ssub)

In a combine, we transform all such across-vector-lanes intrinsics to:

  (i32 (extract_vector_elt (uaddv ...), 0))

This has one big advantage: by making the extract_element explicit, we
enable the existing patterns for lane-aware instructions to fire.
This lets us avoid needlessly going through the GPRs.  Consider:

    uint32x4_t test_mul(uint32x4_t a, uint32x4_t b) {
        return vmulq_n_u32(a, vaddvq_u32(b));
    }

We now generate:
    addv.4s  s1, v1
    mul.4s   v0, v0, v1[0]
instead of the previous:
    addv.4s  s1, v1
    fmov     w8, s1
    dup.4s   v1, w8
    mul.4s   v0, v1, v0

rdar://20044838

llvm-svn: 231840
This commit is contained in:
Ahmed Bougacha 2015-03-10 20:45:38 +00:00
parent e6cdf34116
commit fab5892f8b
8 changed files with 613 additions and 126 deletions

View File

@ -815,6 +815,12 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
case AArch64ISD::FCMGTz: return "AArch64ISD::FCMGTz";
case AArch64ISD::FCMLEz: return "AArch64ISD::FCMLEz";
case AArch64ISD::FCMLTz: return "AArch64ISD::FCMLTz";
case AArch64ISD::SADDV: return "AArch64ISD::SADDV";
case AArch64ISD::UADDV: return "AArch64ISD::UADDV";
case AArch64ISD::SMINV: return "AArch64ISD::SMINV";
case AArch64ISD::UMINV: return "AArch64ISD::UMINV";
case AArch64ISD::SMAXV: return "AArch64ISD::SMAXV";
case AArch64ISD::UMAXV: return "AArch64ISD::UMAXV";
case AArch64ISD::NOT: return "AArch64ISD::NOT";
case AArch64ISD::BIT: return "AArch64ISD::BIT";
case AArch64ISD::CBZ: return "AArch64ISD::CBZ";
@ -7610,6 +7616,15 @@ static SDValue tryCombineCRC32(unsigned Mask, SDNode *N, SelectionDAG &DAG) {
N->getOperand(0), N->getOperand(1), AndN.getOperand(0));
}
static SDValue combineAcrossLanesIntrinsic(unsigned Opc, SDNode *N,
SelectionDAG &DAG) {
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), N->getValueType(0),
DAG.getNode(Opc, SDLoc(N),
N->getOperand(1).getSimpleValueType(),
N->getOperand(1)),
DAG.getConstant(0, MVT::i64));
}
static SDValue performIntrinsicCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const AArch64Subtarget *Subtarget) {
@ -7622,6 +7637,18 @@ static SDValue performIntrinsicCombine(SDNode *N,
case Intrinsic::aarch64_neon_vcvtfxu2fp:
return tryCombineFixedPointConvert(N, DCI, DAG);
break;
case Intrinsic::aarch64_neon_saddv:
return combineAcrossLanesIntrinsic(AArch64ISD::SADDV, N, DAG);
case Intrinsic::aarch64_neon_uaddv:
return combineAcrossLanesIntrinsic(AArch64ISD::UADDV, N, DAG);
case Intrinsic::aarch64_neon_sminv:
return combineAcrossLanesIntrinsic(AArch64ISD::SMINV, N, DAG);
case Intrinsic::aarch64_neon_uminv:
return combineAcrossLanesIntrinsic(AArch64ISD::UMINV, N, DAG);
case Intrinsic::aarch64_neon_smaxv:
return combineAcrossLanesIntrinsic(AArch64ISD::SMAXV, N, DAG);
case Intrinsic::aarch64_neon_umaxv:
return combineAcrossLanesIntrinsic(AArch64ISD::UMAXV, N, DAG);
case Intrinsic::aarch64_neon_fmax:
return DAG.getNode(AArch64ISD::FMAX, SDLoc(N), N->getValueType(0),
N->getOperand(1), N->getOperand(2));

View File

@ -141,6 +141,18 @@ enum {
FCMLEz,
FCMLTz,
// Vector across-lanes addition
// Only the lower result lane is defined.
SADDV,
UADDV,
// Vector across-lanes min/max
// Only the lower result lane is defined.
SMINV,
UMINV,
SMAXV,
UMAXV,
// Vector bitwise negation
NOT,

View File

@ -258,6 +258,13 @@ def SDT_AArch64mull : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
def AArch64smull : SDNode<"AArch64ISD::SMULL", SDT_AArch64mull>;
def AArch64umull : SDNode<"AArch64ISD::UMULL", SDT_AArch64mull>;
def AArch64saddv : SDNode<"AArch64ISD::SADDV", SDT_AArch64UnaryVec>;
def AArch64uaddv : SDNode<"AArch64ISD::UADDV", SDT_AArch64UnaryVec>;
def AArch64sminv : SDNode<"AArch64ISD::SMINV", SDT_AArch64UnaryVec>;
def AArch64uminv : SDNode<"AArch64ISD::UMINV", SDT_AArch64UnaryVec>;
def AArch64smaxv : SDNode<"AArch64ISD::SMAXV", SDT_AArch64UnaryVec>;
def AArch64umaxv : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>;
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
@ -3431,10 +3438,10 @@ defm FMAXNMP : SIMDPairwiseScalarSD<1, 0, 0b01100, "fmaxnmp">;
defm FMAXP : SIMDPairwiseScalarSD<1, 0, 0b01111, "fmaxp">;
defm FMINNMP : SIMDPairwiseScalarSD<1, 1, 0b01100, "fminnmp">;
defm FMINP : SIMDPairwiseScalarSD<1, 1, 0b01111, "fminp">;
def : Pat<(i64 (int_aarch64_neon_saddv (v2i64 V128:$Rn))),
(ADDPv2i64p V128:$Rn)>;
def : Pat<(i64 (int_aarch64_neon_uaddv (v2i64 V128:$Rn))),
(ADDPv2i64p V128:$Rn)>;
def : Pat<(v2i64 (AArch64saddv V128:$Rn)),
(INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
def : Pat<(v2i64 (AArch64uaddv V128:$Rn)),
(INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
def : Pat<(f32 (int_aarch64_neon_faddv (v2f32 V64:$Rn))),
(FADDPv2i32p V64:$Rn)>;
def : Pat<(f32 (int_aarch64_neon_faddv (v4f32 V128:$Rn))),
@ -3787,122 +3794,144 @@ defm FMAXV : SIMDAcrossLanesS<0b01111, 0, "fmaxv", int_aarch64_neon_fmaxv>;
defm FMINNMV : SIMDAcrossLanesS<0b01100, 1, "fminnmv", int_aarch64_neon_fminnmv>;
defm FMINV : SIMDAcrossLanesS<0b01111, 1, "fminv", int_aarch64_neon_fminv>;
multiclass SIMDAcrossLanesSignedIntrinsic<string baseOpc, Intrinsic intOp> {
// Patterns for across-vector intrinsics, that have a node equivalent, that
// returns a vector (with only the low lane defined) instead of a scalar.
// In effect, opNode is the same as (scalar_to_vector (IntNode)).
multiclass SIMDAcrossLanesIntrinsic<string baseOpc,
SDPatternOperator opNode> {
// If a lane instruction caught the vector_extract around opNode, we can
// directly match the latter to the instruction.
def : Pat<(v8i8 (opNode V64:$Rn)),
(INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub)>;
def : Pat<(v16i8 (opNode V128:$Rn)),
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub)>;
def : Pat<(v4i16 (opNode V64:$Rn)),
(INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub)>;
def : Pat<(v8i16 (opNode V128:$Rn)),
(INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub)>;
def : Pat<(v4i32 (opNode V128:$Rn)),
(INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub)>;
// If none did, fallback to the explicit patterns, consuming the vector_extract.
def : Pat<(i32 (vector_extract (insert_subvector undef, (v8i8 (opNode V64:$Rn)),
(i32 0)), (i64 0))),
(EXTRACT_SUBREG (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn),
bsub), ssub)>;
def : Pat<(i32 (vector_extract (v16i8 (opNode V128:$Rn)), (i64 0))),
(EXTRACT_SUBREG (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn),
bsub), ssub)>;
def : Pat<(i32 (vector_extract (insert_subvector undef,
(v4i16 (opNode V64:$Rn)), (i32 0)), (i64 0))),
(EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn),
hsub), ssub)>;
def : Pat<(i32 (vector_extract (v8i16 (opNode V128:$Rn)), (i64 0))),
(EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn),
hsub), ssub)>;
def : Pat<(i32 (vector_extract (v4i32 (opNode V128:$Rn)), (i64 0))),
(EXTRACT_SUBREG (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn),
ssub), ssub)>;
}
multiclass SIMDAcrossLanesSignedIntrinsic<string baseOpc,
SDPatternOperator opNode>
: SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
// If there is a sign extension after this intrinsic, consume it as smov already
// performed it
def : Pat<(i32 (sext_inreg (i32 (intOp (v8i8 V64:$Rn))), i8)),
(i32 (SMOVvi8to32
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
(i64 0)))>;
def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
(i32 (SMOVvi8to32
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
(i64 0)))>;
// If there is a sign extension after this intrinsic, consume it as smov already
// performed it
def : Pat<(i32 (sext_inreg (i32 (intOp (v16i8 V128:$Rn))), i8)),
(i32 (SMOVvi8to32
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
(i64 0)))>;
def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
(i32 (SMOVvi8to32
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
(i64 0)))>;
// If there is a sign extension after this intrinsic, consume it as smov already
// performed it
def : Pat<(i32 (sext_inreg (i32 (intOp (v4i16 V64:$Rn))), i16)),
def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
(opNode (v8i8 V64:$Rn)), (i32 0)), (i64 0))), i8)),
(i32 (SMOVvi8to32
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
(i64 0)))>;
def : Pat<(i32 (sext_inreg (i32 (vector_extract
(opNode (v16i8 V128:$Rn)), (i64 0))), i8)),
(i32 (SMOVvi8to32
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
(i64 0)))>;
def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
(opNode (v4i16 V64:$Rn)), (i32 0)), (i64 0))), i16)),
(i32 (SMOVvi16to32
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
(i64 0)))>;
def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
def : Pat<(i32 (sext_inreg (i32 (vector_extract
(opNode (v8i16 V128:$Rn)), (i64 0))), i16)),
(i32 (SMOVvi16to32
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
(i64 0)))>;
// If there is a sign extension after this intrinsic, consume it as smov already
// performed it
def : Pat<(i32 (sext_inreg (i32 (intOp (v8i16 V128:$Rn))), i16)),
(i32 (SMOVvi16to32
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
(i64 0)))>;
def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
(i32 (SMOVvi16to32
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
(i64 0)))>;
def : Pat<(i32 (intOp (v4i32 V128:$Rn))),
(i32 (EXTRACT_SUBREG
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub),
ssub))>;
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
(i64 0)))>;
}
multiclass SIMDAcrossLanesUnsignedIntrinsic<string baseOpc, Intrinsic intOp> {
multiclass SIMDAcrossLanesUnsignedIntrinsic<string baseOpc,
SDPatternOperator opNode>
: SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
// If there is a masking operation keeping only what has been actually
// generated, consume it.
def : Pat<(i32 (and (i32 (intOp (v8i8 V64:$Rn))), maski8_or_more)),
(i32 (EXTRACT_SUBREG
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
ssub))>;
def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
(i32 (EXTRACT_SUBREG
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
ssub))>;
// If there is a masking operation keeping only what has been actually
// generated, consume it.
def : Pat<(i32 (and (i32 (intOp (v16i8 V128:$Rn))), maski8_or_more)),
def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
(opNode (v8i8 V64:$Rn)), (i32 0)), (i64 0))), maski8_or_more)),
(i32 (EXTRACT_SUBREG
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
ssub))>;
def : Pat<(i32 (and (i32 (vector_extract (opNode (v16i8 V128:$Rn)), (i64 0))),
maski8_or_more)),
(i32 (EXTRACT_SUBREG
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
ssub))>;
def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
(i32 (EXTRACT_SUBREG
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
ssub))>;
// If there is a masking operation keeping only what has been actually
// generated, consume it.
def : Pat<(i32 (and (i32 (intOp (v4i16 V64:$Rn))), maski16_or_more)),
def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
(opNode (v4i16 V64:$Rn)), (i32 0)), (i64 0))), maski16_or_more)),
(i32 (EXTRACT_SUBREG
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
ssub))>;
def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
(i32 (EXTRACT_SUBREG
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
ssub))>;
// If there is a masking operation keeping only what has been actually
// generated, consume it.
def : Pat<(i32 (and (i32 (intOp (v8i16 V128:$Rn))), maski16_or_more)),
def : Pat<(i32 (and (i32 (vector_extract (opNode (v8i16 V128:$Rn)), (i64 0))),
maski16_or_more)),
(i32 (EXTRACT_SUBREG
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
ssub))>;
def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
(i32 (EXTRACT_SUBREG
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
ssub))>;
def : Pat<(i32 (intOp (v4i32 V128:$Rn))),
(i32 (EXTRACT_SUBREG
(INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
(!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub),
ssub))>;
}
defm : SIMDAcrossLanesSignedIntrinsic<"ADDV", AArch64saddv>;
// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
def : Pat<(v2i32 (AArch64saddv (v2i32 V64:$Rn))),
(ADDPv2i32 V64:$Rn, V64:$Rn)>;
defm : SIMDAcrossLanesUnsignedIntrinsic<"ADDV", AArch64uaddv>;
// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
def : Pat<(v2i32 (AArch64uaddv (v2i32 V64:$Rn))),
(ADDPv2i32 V64:$Rn, V64:$Rn)>;
defm : SIMDAcrossLanesSignedIntrinsic<"SMAXV", AArch64smaxv>;
def : Pat<(v2i32 (AArch64smaxv (v2i32 V64:$Rn))),
(SMAXPv2i32 V64:$Rn, V64:$Rn)>;
defm : SIMDAcrossLanesSignedIntrinsic<"SMINV", AArch64sminv>;
def : Pat<(v2i32 (AArch64sminv (v2i32 V64:$Rn))),
(SMINPv2i32 V64:$Rn, V64:$Rn)>;
defm : SIMDAcrossLanesUnsignedIntrinsic<"UMAXV", AArch64umaxv>;
def : Pat<(v2i32 (AArch64umaxv (v2i32 V64:$Rn))),
(UMAXPv2i32 V64:$Rn, V64:$Rn)>;
defm : SIMDAcrossLanesUnsignedIntrinsic<"UMINV", AArch64uminv>;
def : Pat<(v2i32 (AArch64uminv (v2i32 V64:$Rn))),
(UMINPv2i32 V64:$Rn, V64:$Rn)>;
multiclass SIMDAcrossLanesSignedLongIntrinsic<string baseOpc, Intrinsic intOp> {
def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
(i32 (SMOVvi16to32
@ -3964,32 +3993,6 @@ def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
dsub))>;
}
defm : SIMDAcrossLanesSignedIntrinsic<"ADDV", int_aarch64_neon_saddv>;
// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
def : Pat<(i32 (int_aarch64_neon_saddv (v2i32 V64:$Rn))),
(EXTRACT_SUBREG (ADDPv2i32 V64:$Rn, V64:$Rn), ssub)>;
defm : SIMDAcrossLanesUnsignedIntrinsic<"ADDV", int_aarch64_neon_uaddv>;
// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
def : Pat<(i32 (int_aarch64_neon_uaddv (v2i32 V64:$Rn))),
(EXTRACT_SUBREG (ADDPv2i32 V64:$Rn, V64:$Rn), ssub)>;
defm : SIMDAcrossLanesSignedIntrinsic<"SMAXV", int_aarch64_neon_smaxv>;
def : Pat<(i32 (int_aarch64_neon_smaxv (v2i32 V64:$Rn))),
(EXTRACT_SUBREG (SMAXPv2i32 V64:$Rn, V64:$Rn), ssub)>;
defm : SIMDAcrossLanesSignedIntrinsic<"SMINV", int_aarch64_neon_sminv>;
def : Pat<(i32 (int_aarch64_neon_sminv (v2i32 V64:$Rn))),
(EXTRACT_SUBREG (SMINPv2i32 V64:$Rn, V64:$Rn), ssub)>;
defm : SIMDAcrossLanesUnsignedIntrinsic<"UMAXV", int_aarch64_neon_umaxv>;
def : Pat<(i32 (int_aarch64_neon_umaxv (v2i32 V64:$Rn))),
(EXTRACT_SUBREG (UMAXPv2i32 V64:$Rn, V64:$Rn), ssub)>;
defm : SIMDAcrossLanesUnsignedIntrinsic<"UMINV", int_aarch64_neon_uminv>;
def : Pat<(i32 (int_aarch64_neon_uminv (v2i32 V64:$Rn))),
(EXTRACT_SUBREG (UMINPv2i32 V64:$Rn, V64:$Rn), ssub)>;
defm : SIMDAcrossLanesSignedLongIntrinsic<"SADDLV", int_aarch64_neon_saddlv>;
defm : SIMDAcrossLanesUnsignedLongIntrinsic<"UADDLV", int_aarch64_neon_uaddlv>;

View File

@ -1,4 +1,4 @@
; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s
; RUN: llc -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false < %s | FileCheck %s
define signext i8 @test_vmaxv_s8(<8 x i8> %a1) {
; CHECK: test_vmaxv_s8
@ -65,6 +65,76 @@ entry:
ret i32 %vmaxv.i
}
define <8 x i8> @test_vmaxv_s8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) {
; CHECK-LABEL: test_vmaxv_s8_used_by_laneop:
; CHECK: smaxv.8b b[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8> %a2)
%1 = trunc i32 %0 to i8
%2 = insertelement <8 x i8> %a1, i8 %1, i32 3
ret <8 x i8> %2
}
define <4 x i16> @test_vmaxv_s16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) {
; CHECK-LABEL: test_vmaxv_s16_used_by_laneop:
; CHECK: smaxv.4h h[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16> %a2)
%1 = trunc i32 %0 to i16
%2 = insertelement <4 x i16> %a1, i16 %1, i32 3
ret <4 x i16> %2
}
define <2 x i32> @test_vmaxv_s32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) {
; CHECK-LABEL: test_vmaxv_s32_used_by_laneop:
; CHECK: smaxp.2s v[[REGNUM:[0-9]+]], v1, v1
; CHECK-NEXT: ins.s v0[1], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v2i32(<2 x i32> %a2)
%1 = insertelement <2 x i32> %a1, i32 %0, i32 1
ret <2 x i32> %1
}
define <16 x i8> @test_vmaxvq_s8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) {
; CHECK-LABEL: test_vmaxvq_s8_used_by_laneop:
; CHECK: smaxv.16b b[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8> %a2)
%1 = trunc i32 %0 to i8
%2 = insertelement <16 x i8> %a1, i8 %1, i32 3
ret <16 x i8> %2
}
define <8 x i16> @test_vmaxvq_s16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) {
; CHECK-LABEL: test_vmaxvq_s16_used_by_laneop:
; CHECK: smaxv.8h h[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16> %a2)
%1 = trunc i32 %0 to i16
%2 = insertelement <8 x i16> %a1, i16 %1, i32 3
ret <8 x i16> %2
}
define <4 x i32> @test_vmaxvq_s32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) {
; CHECK-LABEL: test_vmaxvq_s32_used_by_laneop:
; CHECK: smaxv.4s s[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.s v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32> %a2)
%1 = insertelement <4 x i32> %a1, i32 %0, i32 3
ret <4 x i32> %1
}
declare i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32>)
declare i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16>)
declare i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8>)

View File

@ -1,4 +1,4 @@
; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s
; RUN: llc -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false < %s | FileCheck %s
define signext i8 @test_vminv_s8(<8 x i8> %a1) {
; CHECK: test_vminv_s8
@ -65,6 +65,76 @@ entry:
ret i32 %vminv.i
}
define <8 x i8> @test_vminv_s8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) {
; CHECK-LABEL: test_vminv_s8_used_by_laneop:
; CHECK: sminv.8b b[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> %a2)
%1 = trunc i32 %0 to i8
%2 = insertelement <8 x i8> %a1, i8 %1, i32 3
ret <8 x i8> %2
}
define <4 x i16> @test_vminv_s16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) {
; CHECK-LABEL: test_vminv_s16_used_by_laneop:
; CHECK: sminv.4h h[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> %a2)
%1 = trunc i32 %0 to i16
%2 = insertelement <4 x i16> %a1, i16 %1, i32 3
ret <4 x i16> %2
}
define <2 x i32> @test_vminv_s32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) {
; CHECK-LABEL: test_vminv_s32_used_by_laneop:
; CHECK: sminp.2s v[[REGNUM:[0-9]+]], v1, v1
; CHECK-NEXT: ins.s v0[1], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v2i32(<2 x i32> %a2)
%1 = insertelement <2 x i32> %a1, i32 %0, i32 1
ret <2 x i32> %1
}
define <16 x i8> @test_vminvq_s8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) {
; CHECK-LABEL: test_vminvq_s8_used_by_laneop:
; CHECK: sminv.16b b[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> %a2)
%1 = trunc i32 %0 to i8
%2 = insertelement <16 x i8> %a1, i8 %1, i32 3
ret <16 x i8> %2
}
define <8 x i16> @test_vminvq_s16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) {
; CHECK-LABEL: test_vminvq_s16_used_by_laneop:
; CHECK: sminv.8h h[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> %a2)
%1 = trunc i32 %0 to i16
%2 = insertelement <8 x i16> %a1, i16 %1, i32 3
ret <8 x i16> %2
}
define <4 x i32> @test_vminvq_s32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) {
; CHECK-LABEL: test_vminvq_s32_used_by_laneop:
; CHECK: sminv.4s s[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.s v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> %a2)
%1 = insertelement <4 x i32> %a1, i32 %0, i32 3
ret <4 x i32> %1
}
declare i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32>)
declare i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16>)
declare i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8>)

View File

@ -1,4 +1,4 @@
; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s
define i32 @vmax_u8x8(<8 x i8> %a) nounwind ssp {
; CHECK-LABEL: vmax_u8x8:
@ -86,7 +86,79 @@ return:
ret i32 %retval.0
}
define <8 x i8> @test_vmaxv_u8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) {
; CHECK-LABEL: test_vmaxv_u8_used_by_laneop:
; CHECK: umaxv.8b b[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> %a2)
%1 = trunc i32 %0 to i8
%2 = insertelement <8 x i8> %a1, i8 %1, i32 3
ret <8 x i8> %2
}
define <4 x i16> @test_vmaxv_u16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) {
; CHECK-LABEL: test_vmaxv_u16_used_by_laneop:
; CHECK: umaxv.4h h[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16> %a2)
%1 = trunc i32 %0 to i16
%2 = insertelement <4 x i16> %a1, i16 %1, i32 3
ret <4 x i16> %2
}
define <2 x i32> @test_vmaxv_u32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) {
; CHECK-LABEL: test_vmaxv_u32_used_by_laneop:
; CHECK: umaxp.2s v[[REGNUM:[0-9]+]], v1, v1
; CHECK-NEXT: ins.s v0[1], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v2i32(<2 x i32> %a2)
%1 = insertelement <2 x i32> %a1, i32 %0, i32 1
ret <2 x i32> %1
}
define <16 x i8> @test_vmaxvq_u8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) {
; CHECK-LABEL: test_vmaxvq_u8_used_by_laneop:
; CHECK: umaxv.16b b[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> %a2)
%1 = trunc i32 %0 to i8
%2 = insertelement <16 x i8> %a1, i8 %1, i32 3
ret <16 x i8> %2
}
define <8 x i16> @test_vmaxvq_u16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) {
; CHECK-LABEL: test_vmaxvq_u16_used_by_laneop:
; CHECK: umaxv.8h h[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> %a2)
%1 = trunc i32 %0 to i16
%2 = insertelement <8 x i16> %a1, i16 %1, i32 3
ret <8 x i16> %2
}
define <4 x i32> @test_vmaxvq_u32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) {
; CHECK-LABEL: test_vmaxvq_u32_used_by_laneop:
; CHECK: umaxv.4s s[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.s v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32> %a2)
%1 = insertelement <4 x i32> %a1, i32 %0, i32 3
ret <4 x i32> %1
}
declare i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8>) nounwind readnone
declare i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16>) nounwind readnone
declare i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16>) nounwind readnone
declare i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8>) nounwind readnone
declare i32 @llvm.aarch64.neon.umaxv.i32.v2i32(<2 x i32>) nounwind readnone
declare i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32>) nounwind readnone

View File

@ -1,4 +1,4 @@
; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s
define i32 @vmin_u8x8(<8 x i8> %a) nounwind ssp {
; CHECK-LABEL: vmin_u8x8:
@ -86,7 +86,78 @@ return:
ret i32 %retval.0
}
define <8 x i8> @test_vminv_u8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) {
; CHECK-LABEL: test_vminv_u8_used_by_laneop:
; CHECK: uminv.8b b[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> %a2)
%1 = trunc i32 %0 to i8
%2 = insertelement <8 x i8> %a1, i8 %1, i32 3
ret <8 x i8> %2
}
define <4 x i16> @test_vminv_u16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) {
; CHECK-LABEL: test_vminv_u16_used_by_laneop:
; CHECK: uminv.4h h[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16> %a2)
%1 = trunc i32 %0 to i16
%2 = insertelement <4 x i16> %a1, i16 %1, i32 3
ret <4 x i16> %2
}
define <2 x i32> @test_vminv_u32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) {
; CHECK-LABEL: test_vminv_u32_used_by_laneop:
; CHECK: uminp.2s v[[REGNUM:[0-9]+]], v1, v1
; CHECK-NEXT: ins.s v0[1], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v2i32(<2 x i32> %a2)
%1 = insertelement <2 x i32> %a1, i32 %0, i32 1
ret <2 x i32> %1
}
define <16 x i8> @test_vminvq_u8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) {
; CHECK-LABEL: test_vminvq_u8_used_by_laneop:
; CHECK: uminv.16b b[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> %a2)
%1 = trunc i32 %0 to i8
%2 = insertelement <16 x i8> %a1, i8 %1, i32 3
ret <16 x i8> %2
}
define <8 x i16> @test_vminvq_u16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) {
; CHECK-LABEL: test_vminvq_u16_used_by_laneop:
; CHECK: uminv.8h h[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16> %a2)
%1 = trunc i32 %0 to i16
%2 = insertelement <8 x i16> %a1, i16 %1, i32 3
ret <8 x i16> %2
}
define <4 x i32> @test_vminvq_u32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) {
; CHECK-LABEL: test_vminvq_u32_used_by_laneop:
; CHECK: uminv.4s s[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.s v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32> %a2)
%1 = insertelement <4 x i32> %a1, i32 %0, i32 3
ret <4 x i32> %1
}
declare i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8>) nounwind readnone
declare i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16>) nounwind readnone
declare i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16>) nounwind readnone
declare i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8>) nounwind readnone
declare i32 @llvm.aarch64.neon.uminv.i32.v2i32(<2 x i32>) nounwind readnone
declare i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32>) nounwind readnone

View File

@ -1,4 +1,4 @@
; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s -mcpu=cyclone | FileCheck %s
; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s -asm-verbose=false -mcpu=cyclone | FileCheck %s
define signext i8 @test_vaddv_s8(<8 x i8> %a1) {
; CHECK-LABEL: test_vaddv_s8:
@ -11,6 +11,18 @@ entry:
ret i8 %0
}
define <8 x i8> @test_vaddv_s8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) {
; CHECK-LABEL: test_vaddv_s8_used_by_laneop:
; CHECK: addv.8b b[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8> %a2)
%1 = trunc i32 %0 to i8
%2 = insertelement <8 x i8> %a1, i8 %1, i32 3
ret <8 x i8> %2
}
define signext i16 @test_vaddv_s16(<4 x i16> %a1) {
; CHECK-LABEL: test_vaddv_s16:
; CHECK: addv.4h h[[REGNUM:[0-9]+]], v0
@ -22,6 +34,18 @@ entry:
ret i16 %0
}
define <4 x i16> @test_vaddv_s16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) {
; CHECK-LABEL: test_vaddv_s16_used_by_laneop:
; CHECK: addv.4h h[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16> %a2)
%1 = trunc i32 %0 to i16
%2 = insertelement <4 x i16> %a1, i16 %1, i32 3
ret <4 x i16> %2
}
define i32 @test_vaddv_s32(<2 x i32> %a1) {
; CHECK-LABEL: test_vaddv_s32:
; 2 x i32 is not supported by the ISA, thus, this is a special case
@ -33,6 +57,17 @@ entry:
ret i32 %vaddv.i
}
define <2 x i32> @test_vaddv_s32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) {
; CHECK-LABEL: test_vaddv_s32_used_by_laneop:
; CHECK: addp.2s v[[REGNUM:[0-9]+]], v1, v1
; CHECK-NEXT: ins.s v0[1], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.saddv.i32.v2i32(<2 x i32> %a2)
%1 = insertelement <2 x i32> %a1, i32 %0, i32 1
ret <2 x i32> %1
}
define i64 @test_vaddv_s64(<2 x i64> %a1) {
; CHECK-LABEL: test_vaddv_s64:
; CHECK: addp.2d [[REGNUM:d[0-9]+]], v0
@ -43,6 +78,17 @@ entry:
ret i64 %vaddv.i
}
define <2 x i64> @test_vaddv_s64_used_by_laneop(<2 x i64> %a1, <2 x i64> %a2) {
; CHECK-LABEL: test_vaddv_s64_used_by_laneop:
; CHECK: addp.2d d[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.d v0[1], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i64 @llvm.aarch64.neon.saddv.i64.v2i64(<2 x i64> %a2)
%1 = insertelement <2 x i64> %a1, i64 %0, i64 1
ret <2 x i64> %1
}
define zeroext i8 @test_vaddv_u8(<8 x i8> %a1) {
; CHECK-LABEL: test_vaddv_u8:
; CHECK: addv.8b b[[REGNUM:[0-9]+]], v0
@ -54,6 +100,18 @@ entry:
ret i8 %0
}
define <8 x i8> @test_vaddv_u8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) {
; CHECK-LABEL: test_vaddv_u8_used_by_laneop:
; CHECK: addv.8b b[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.uaddv.i32.v8i8(<8 x i8> %a2)
%1 = trunc i32 %0 to i8
%2 = insertelement <8 x i8> %a1, i8 %1, i32 3
ret <8 x i8> %2
}
define i32 @test_vaddv_u8_masked(<8 x i8> %a1) {
; CHECK-LABEL: test_vaddv_u8_masked:
; CHECK: addv.8b b[[REGNUM:[0-9]+]], v0
@ -76,6 +134,18 @@ entry:
ret i16 %0
}
define <4 x i16> @test_vaddv_u16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) {
; CHECK-LABEL: test_vaddv_u16_used_by_laneop:
; CHECK: addv.4h h[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.uaddv.i32.v4i16(<4 x i16> %a2)
%1 = trunc i32 %0 to i16
%2 = insertelement <4 x i16> %a1, i16 %1, i32 3
ret <4 x i16> %2
}
define i32 @test_vaddv_u16_masked(<4 x i16> %a1) {
; CHECK-LABEL: test_vaddv_u16_masked:
; CHECK: addv.4h h[[REGNUM:[0-9]+]], v0
@ -98,6 +168,17 @@ entry:
ret i32 %vaddv.i
}
define <2 x i32> @test_vaddv_u32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) {
; CHECK-LABEL: test_vaddv_u32_used_by_laneop:
; CHECK: addp.2s v[[REGNUM:[0-9]+]], v1, v1
; CHECK-NEXT: ins.s v0[1], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.uaddv.i32.v2i32(<2 x i32> %a2)
%1 = insertelement <2 x i32> %a1, i32 %0, i32 1
ret <2 x i32> %1
}
define float @test_vaddv_f32(<2 x float> %a1) {
; CHECK-LABEL: test_vaddv_f32:
; CHECK: faddp.2s s0, v0
@ -136,6 +217,17 @@ entry:
ret i64 %vaddv.i
}
define <2 x i64> @test_vaddv_u64_used_by_laneop(<2 x i64> %a1, <2 x i64> %a2) {
; CHECK-LABEL: test_vaddv_u64_used_by_laneop:
; CHECK: addp.2d d[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.d v0[1], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> %a2)
%1 = insertelement <2 x i64> %a1, i64 %0, i64 1
ret <2 x i64> %1
}
define <1 x i64> @test_vaddv_u64_to_vec(<2 x i64> %a1) {
; CHECK-LABEL: test_vaddv_u64_to_vec:
; CHECK: addp.2d d0, v0
@ -159,6 +251,18 @@ entry:
ret i8 %0
}
define <16 x i8> @test_vaddvq_s8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) {
; CHECK-LABEL: test_vaddvq_s8_used_by_laneop:
; CHECK: addv.16b b[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8> %a2)
%1 = trunc i32 %0 to i8
%2 = insertelement <16 x i8> %a1, i8 %1, i32 3
ret <16 x i8> %2
}
define signext i16 @test_vaddvq_s16(<8 x i16> %a1) {
; CHECK-LABEL: test_vaddvq_s16:
; CHECK: addv.8h h[[REGNUM:[0-9]+]], v0
@ -170,6 +274,18 @@ entry:
ret i16 %0
}
define <8 x i16> @test_vaddvq_s16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) {
; CHECK-LABEL: test_vaddvq_s16_used_by_laneop:
; CHECK: addv.8h h[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16> %a2)
%1 = trunc i32 %0 to i16
%2 = insertelement <8 x i16> %a1, i16 %1, i32 3
ret <8 x i16> %2
}
define i32 @test_vaddvq_s32(<4 x i32> %a1) {
; CHECK-LABEL: test_vaddvq_s32:
; CHECK: addv.4s [[REGNUM:s[0-9]+]], v0
@ -180,6 +296,17 @@ entry:
ret i32 %vaddv.i
}
define <4 x i32> @test_vaddvq_s32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) {
; CHECK-LABEL: test_vaddvq_s32_used_by_laneop:
; CHECK: addv.4s s[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.s v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> %a2)
%1 = insertelement <4 x i32> %a1, i32 %0, i32 3
ret <4 x i32> %1
}
define zeroext i8 @test_vaddvq_u8(<16 x i8> %a1) {
; CHECK-LABEL: test_vaddvq_u8:
; CHECK: addv.16b b[[REGNUM:[0-9]+]], v0
@ -191,6 +318,18 @@ entry:
ret i8 %0
}
define <16 x i8> @test_vaddvq_u8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) {
; CHECK-LABEL: test_vaddvq_u8_used_by_laneop:
; CHECK: addv.16b b[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.uaddv.i32.v16i8(<16 x i8> %a2)
%1 = trunc i32 %0 to i8
%2 = insertelement <16 x i8> %a1, i8 %1, i32 3
ret <16 x i8> %2
}
define zeroext i16 @test_vaddvq_u16(<8 x i16> %a1) {
; CHECK-LABEL: test_vaddvq_u16:
; CHECK: addv.8h h[[REGNUM:[0-9]+]], v0
@ -202,6 +341,18 @@ entry:
ret i16 %0
}
define <8 x i16> @test_vaddvq_u16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) {
; CHECK-LABEL: test_vaddvq_u16_used_by_laneop:
; CHECK: addv.8h h[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.uaddv.i32.v8i16(<8 x i16> %a2)
%1 = trunc i32 %0 to i16
%2 = insertelement <8 x i16> %a1, i16 %1, i32 3
ret <8 x i16> %2
}
define i32 @test_vaddvq_u32(<4 x i32> %a1) {
; CHECK-LABEL: test_vaddvq_u32:
; CHECK: addv.4s [[REGNUM:s[0-9]+]], v0
@ -212,6 +363,17 @@ entry:
ret i32 %vaddv.i
}
define <4 x i32> @test_vaddvq_u32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) {
; CHECK-LABEL: test_vaddvq_u32_used_by_laneop:
; CHECK: addv.4s s[[REGNUM:[0-9]+]], v1
; CHECK-NEXT: ins.s v0[3], v[[REGNUM]][0]
; CHECK-NEXT: ret
entry:
%0 = tail call i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32> %a2)
%1 = insertelement <4 x i32> %a1, i32 %0, i32 3
ret <4 x i32> %1
}
declare i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32>)
declare i32 @llvm.aarch64.neon.uaddv.i32.v8i16(<8 x i16>)