diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp index cfcab55ce4ec..911f76ad45d2 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -582,23 +582,27 @@ SDValue DAGTypeLegalizer::PromoteIntRes_MGATHER(MaskedGatherSDNode *N) { /// Promote the overflow flag of an overflowing arithmetic node. SDValue DAGTypeLegalizer::PromoteIntRes_Overflow(SDNode *N) { - // Simply change the return type of the boolean result. + // Change the return type of the boolean result while obeying + // getSetCCResultType. EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(1)); - EVT ValueVTs[] = { N->getValueType(0), NVT }; + EVT VT = N->getValueType(0); + EVT SVT = getSetCCResultType(VT); SDValue Ops[3] = { N->getOperand(0), N->getOperand(1) }; unsigned NumOps = N->getNumOperands(); assert(NumOps <= 3 && "Too many operands"); if (NumOps == 3) Ops[2] = N->getOperand(2); - SDValue Res = DAG.getNode(N->getOpcode(), SDLoc(N), - DAG.getVTList(ValueVTs), makeArrayRef(Ops, NumOps)); + SDLoc dl(N); + SDValue Res = DAG.getNode(N->getOpcode(), dl, DAG.getVTList(VT, SVT), + makeArrayRef(Ops, NumOps)); // Modified the sum result - switch anything that used the old sum to use // the new one. ReplaceValueWith(SDValue(N, 0), Res); - return SDValue(Res.getNode(), 1); + // Convert to the expected type. + return DAG.getBoolExtOrTrunc(Res.getValue(1), dl, NVT, VT); } SDValue DAGTypeLegalizer::PromoteIntRes_ADDSUBSAT(SDNode *N) { diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 514e6f59cb56..b9be29be61dd 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -8984,13 +8984,19 @@ std::pair SelectionDAG::UnrollVectorOverflowOp( ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE); ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE); - SDVTList VTs = getVTList(ResEltVT, OvEltVT); + EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT); + SDVTList VTs = getVTList(ResEltVT, SVT); SmallVector ResScalars; SmallVector OvScalars; for (unsigned i = 0; i < NE; ++i) { SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]); + SDValue Ov = + getSelect(dl, OvEltVT, Res.getValue(1), + getBoolConstant(true, dl, OvEltVT, ResVT), + getConstant(0, dl, OvEltVT)); + ResScalars.push_back(Res); - OvScalars.push_back(SDValue(Res.getNode(), 1)); + OvScalars.push_back(Ov); } ResScalars.append(ResNE - NE, getUNDEF(ResEltVT)); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index cb7877eeb24c..ae7fd6c93b32 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -20209,8 +20209,7 @@ static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) { std::tie(Value, Overflow) = getX86XALUOOp(Cond, Op, DAG); SDValue SetCC = getSETCC(Cond, Overflow, DL, DAG); - if (Op->getValueType(1) != MVT::i8) - SetCC = DAG.getNode(ISD::ZERO_EXTEND, DL, Op->getValueType(1), SetCC); + assert(Op->getValueType(1) == MVT::i8 && "Unexpected VT!"); return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Value, SetCC); } diff --git a/llvm/test/CodeGen/AArch64/vec_uaddo.ll b/llvm/test/CodeGen/AArch64/vec_uaddo.ll index 6c2f07f8dc9b..6374196fe232 100644 --- a/llvm/test/CodeGen/AArch64/vec_uaddo.ll +++ b/llvm/test/CodeGen/AArch64/vec_uaddo.ll @@ -50,10 +50,8 @@ define <3 x i32> @uaddo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) noun ; CHECK-LABEL: uaddo_v3i32: ; CHECK: // %bb.0: ; CHECK-NEXT: add v1.4s, v0.4s, v1.4s -; CHECK-NEXT: cmhi v0.4s, v0.4s, v1.4s -; CHECK-NEXT: xtn v0.4h, v0.4s ; CHECK-NEXT: add x8, x0, #8 // =8 -; CHECK-NEXT: sshll v0.4s, v0.4h, #0 +; CHECK-NEXT: cmhi v0.4s, v0.4s, v1.4s ; CHECK-NEXT: st1 { v1.s }[2], [x8] ; CHECK-NEXT: str d1, [x0] ; CHECK-NEXT: ret @@ -70,8 +68,6 @@ define <4 x i32> @uaddo_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i32>* %p2) noun ; CHECK: // %bb.0: ; CHECK-NEXT: add v1.4s, v0.4s, v1.4s ; CHECK-NEXT: cmhi v0.4s, v0.4s, v1.4s -; CHECK-NEXT: xtn v0.4h, v0.4s -; CHECK-NEXT: sshll v0.4s, v0.4h, #0 ; CHECK-NEXT: str q1, [x0] ; CHECK-NEXT: ret %t = call {<4 x i32>, <4 x i1>} @llvm.uadd.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> %a1) @@ -85,37 +81,33 @@ define <4 x i32> @uaddo_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i32>* %p2) noun define <6 x i32> @uaddo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) nounwind { ; CHECK-LABEL: uaddo_v6i32: ; CHECK: // %bb.0: -; CHECK-NEXT: fmov s0, w6 -; CHECK-NEXT: mov x8, sp -; CHECK-NEXT: mov v0.s[1], w7 -; CHECK-NEXT: ldr s2, [sp, #16] -; CHECK-NEXT: ld1 { v0.s }[2], [x8] -; CHECK-NEXT: add x9, sp, #8 // =8 -; CHECK-NEXT: add x10, sp, #24 // =24 -; CHECK-NEXT: fmov s1, w0 -; CHECK-NEXT: ld1 { v2.s }[1], [x10] -; CHECK-NEXT: ld1 { v0.s }[3], [x9] -; CHECK-NEXT: mov v1.s[1], w1 -; CHECK-NEXT: fmov s3, w4 +; CHECK-NEXT: fmov s2, w6 +; CHECK-NEXT: ldr s0, [sp, #16] +; CHECK-NEXT: mov x9, sp +; CHECK-NEXT: mov v2.s[1], w7 +; CHECK-NEXT: ld1 { v2.s }[2], [x9] +; CHECK-NEXT: add x8, sp, #24 // =24 +; CHECK-NEXT: add x10, sp, #8 // =8 +; CHECK-NEXT: ld1 { v0.s }[1], [x8] +; CHECK-NEXT: fmov s3, w0 ; CHECK-NEXT: ldr x11, [sp, #32] -; CHECK-NEXT: mov v1.s[2], w2 -; CHECK-NEXT: mov v3.s[1], w5 -; CHECK-NEXT: mov v1.s[3], w3 -; CHECK-NEXT: add v2.4s, v3.4s, v2.4s +; CHECK-NEXT: ld1 { v2.s }[3], [x10] +; CHECK-NEXT: fmov s1, w4 +; CHECK-NEXT: mov v3.s[1], w1 +; CHECK-NEXT: mov v1.s[1], w5 +; CHECK-NEXT: mov v3.s[2], w2 +; CHECK-NEXT: mov v3.s[3], w3 ; CHECK-NEXT: add v0.4s, v1.4s, v0.4s -; CHECK-NEXT: cmhi v3.4s, v3.4s, v2.4s ; CHECK-NEXT: cmhi v1.4s, v1.4s, v0.4s -; CHECK-NEXT: str d2, [x11, #16] -; CHECK-NEXT: xtn v2.4h, v3.4s -; CHECK-NEXT: xtn v1.4h, v1.4s -; CHECK-NEXT: sshll v2.4s, v2.4h, #0 -; CHECK-NEXT: sshll v1.4s, v1.4h, #0 -; CHECK-NEXT: mov w5, v2.s[1] -; CHECK-NEXT: mov w1, v1.s[1] -; CHECK-NEXT: mov w2, v1.s[2] -; CHECK-NEXT: mov w3, v1.s[3] -; CHECK-NEXT: fmov w4, s2 -; CHECK-NEXT: fmov w0, s1 +; CHECK-NEXT: str d0, [x11, #16] +; CHECK-NEXT: add v0.4s, v3.4s, v2.4s +; CHECK-NEXT: cmhi v2.4s, v3.4s, v0.4s +; CHECK-NEXT: mov w5, v1.s[1] +; CHECK-NEXT: mov w1, v2.s[1] +; CHECK-NEXT: mov w2, v2.s[2] +; CHECK-NEXT: mov w3, v2.s[3] +; CHECK-NEXT: fmov w4, s1 +; CHECK-NEXT: fmov w0, s2 ; CHECK-NEXT: str q0, [x11] ; CHECK-NEXT: ret %t = call {<6 x i32>, <6 x i1>} @llvm.uadd.with.overflow.v6i32(<6 x i32> %a0, <6 x i32> %a1) @@ -129,14 +121,10 @@ define <6 x i32> @uaddo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) noun define <8 x i32> @uaddo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) nounwind { ; CHECK-LABEL: uaddo_v8i32: ; CHECK: // %bb.0: -; CHECK-NEXT: add v3.4s, v1.4s, v3.4s ; CHECK-NEXT: add v2.4s, v0.4s, v2.4s -; CHECK-NEXT: cmhi v1.4s, v1.4s, v3.4s +; CHECK-NEXT: add v3.4s, v1.4s, v3.4s ; CHECK-NEXT: cmhi v0.4s, v0.4s, v2.4s -; CHECK-NEXT: xtn v1.4h, v1.4s -; CHECK-NEXT: xtn v0.4h, v0.4s -; CHECK-NEXT: sshll v0.4s, v0.4h, #0 -; CHECK-NEXT: sshll v1.4s, v1.4h, #0 +; CHECK-NEXT: cmhi v1.4s, v1.4s, v3.4s ; CHECK-NEXT: stp q2, q3, [x0] ; CHECK-NEXT: ret %t = call {<8 x i32>, <8 x i1>} @llvm.uadd.with.overflow.v8i32(<8 x i32> %a0, <8 x i32> %a1) diff --git a/llvm/test/CodeGen/AArch64/vec_umulo.ll b/llvm/test/CodeGen/AArch64/vec_umulo.ll index 766916c4b66a..c30d2bde3754 100644 --- a/llvm/test/CodeGen/AArch64/vec_umulo.ll +++ b/llvm/test/CodeGen/AArch64/vec_umulo.ll @@ -233,24 +233,24 @@ define <8 x i32> @umulo_v8i16(<8 x i16> %a0, <8 x i16> %a1, <8 x i16>* %p2) noun define <2 x i32> @umulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) nounwind { ; CHECK-LABEL: umulo_v2i64: ; CHECK: // %bb.0: -; CHECK-NEXT: fmov x10, d1 -; CHECK-NEXT: fmov x11, d0 ; CHECK-NEXT: mov x8, v1.d[1] -; CHECK-NEXT: mov x9, v0.d[1] -; CHECK-NEXT: umulh x12, x11, x10 -; CHECK-NEXT: mul x10, x11, x10 -; CHECK-NEXT: cmp xzr, x12 -; CHECK-NEXT: umulh x11, x9, x8 -; CHECK-NEXT: mul x8, x9, x8 -; CHECK-NEXT: cset w9, ne +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: mov x10, v0.d[1] +; CHECK-NEXT: fmov x11, d0 +; CHECK-NEXT: umulh x12, x11, x9 +; CHECK-NEXT: mul x9, x11, x9 +; CHECK-NEXT: umulh x11, x10, x8 ; CHECK-NEXT: cmp xzr, x11 -; CHECK-NEXT: fmov d1, x10 -; CHECK-NEXT: fmov s0, w9 -; CHECK-NEXT: cset w9, ne -; CHECK-NEXT: mov v0.s[1], w9 +; CHECK-NEXT: csetm x11, ne +; CHECK-NEXT: cmp xzr, x12 +; CHECK-NEXT: csetm x12, ne +; CHECK-NEXT: fmov d0, x12 +; CHECK-NEXT: mul x8, x10, x8 +; CHECK-NEXT: fmov d1, x9 +; CHECK-NEXT: mov v0.d[1], x11 +; CHECK-NEXT: xtn v0.2s, v0.2d ; CHECK-NEXT: mov v1.d[1], x8 ; CHECK-NEXT: str q1, [x0] -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-NEXT: ret %t = call {<2 x i64>, <2 x i1>} @llvm.umul.with.overflow.v2i64(<2 x i64> %a0, <2 x i64> %a1) %val = extractvalue {<2 x i64>, <2 x i1>} %t, 0 @@ -276,19 +276,16 @@ define <4 x i32> @umulo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) noun ; CHECK-NEXT: fmov w11, s0 ; CHECK-NEXT: cmeq v0.4s, v1.4s, #0 ; CHECK-NEXT: cmeq v1.4s, v2.4s, #0 -; CHECK-NEXT: mvn v0.16b, v0.16b -; CHECK-NEXT: mvn v1.16b, v1.16b -; CHECK-NEXT: xtn v0.4h, v0.4s -; CHECK-NEXT: xtn v1.4h, v1.4s ; CHECK-NEXT: sturh w8, [x0, #9] ; CHECK-NEXT: lsr w8, w8, #16 -; CHECK-NEXT: orr v0.8b, v1.8b, v0.8b +; CHECK-NEXT: mvn v0.16b, v0.16b +; CHECK-NEXT: mvn v1.16b, v1.16b ; CHECK-NEXT: strh w9, [x0, #6] ; CHECK-NEXT: sturh w10, [x0, #3] ; CHECK-NEXT: lsr w9, w9, #16 ; CHECK-NEXT: lsr w10, w10, #16 ; CHECK-NEXT: strb w8, [x0, #11] -; CHECK-NEXT: sshll v0.4s, v0.4h, #0 +; CHECK-NEXT: orr v0.16b, v1.16b, v0.16b ; CHECK-NEXT: lsr w8, w11, #16 ; CHECK-NEXT: strh w11, [x0] ; CHECK-NEXT: strb w9, [x0, #8] diff --git a/llvm/test/CodeGen/X86/vec_saddo.ll b/llvm/test/CodeGen/X86/vec_saddo.ll index 59abe0b99d02..2235ec2f5df6 100644 --- a/llvm/test/CodeGen/X86/vec_saddo.ll +++ b/llvm/test/CodeGen/X86/vec_saddo.ll @@ -581,12 +581,6 @@ define <6 x i32> @saddo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) noun ; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm0 ; AVX1-NEXT: vandps %ymm0, %ymm8, %ymm0 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpmovsxwd %xmm0, %xmm3 -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0 ; AVX1-NEXT: vmovq %xmm2, 16(%rdi) ; AVX1-NEXT: vmovdqa %xmm1, (%rdi) ; AVX1-NEXT: retq @@ -605,9 +599,6 @@ define <6 x i32> @saddo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) noun ; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm0 ; AVX2-NEXT: vpcmpeqd %ymm0, %ymm5, %ymm0 ; AVX2-NEXT: vpandn %ymm3, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX2-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 ; AVX2-NEXT: vmovq %xmm2, 16(%rdi) ; AVX2-NEXT: vmovdqa %xmm1, (%rdi) @@ -695,21 +686,15 @@ define <8 x i32> @saddo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) noun ; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm6 ; AVX1-NEXT: vpcmpeqd %xmm6, %xmm7, %xmm6 ; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm6 -; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpcmpgtd %xmm0, %xmm3, %xmm1 -; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm1 -; AVX1-NEXT: vpcmpeqd %xmm1, %xmm4, %xmm1 -; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm1, %ymm1 -; AVX1-NEXT: vandps %ymm1, %ymm8, %ymm1 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vpackssdw %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2 -; AVX1-NEXT: vpmovsxwd %xmm1, %xmm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1-NEXT: vmovaps %ymm2, (%rdi) +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm1 +; AVX1-NEXT: vpcmpgtd %xmm1, %xmm3, %xmm0 +; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm0, %xmm4, %xmm0 +; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm0 +; AVX1-NEXT: vandps %ymm0, %ymm8, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: vmovaps %ymm1, (%rdi) ; AVX1-NEXT: retq ; ; AVX2-LABEL: saddo_v8i32: @@ -726,9 +711,6 @@ define <8 x i32> @saddo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) noun ; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm0 ; AVX2-NEXT: vpcmpeqd %ymm0, %ymm5, %ymm0 ; AVX2-NEXT: vpandn %ymm3, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX2-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX2-NEXT: vmovdqa %ymm1, (%rdi) ; AVX2-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vec_smulo.ll b/llvm/test/CodeGen/X86/vec_smulo.ll index 6c26e690fe78..e4a6524ef24d 100644 --- a/llvm/test/CodeGen/X86/vec_smulo.ll +++ b/llvm/test/CodeGen/X86/vec_smulo.ll @@ -69,7 +69,9 @@ define <2 x i32> @smulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun ; SSE2-NEXT: movq %xmm1, %rsi ; SSE2-NEXT: xorl %eax, %eax ; SSE2-NEXT: imulq %rdx, %rsi -; SSE2-NEXT: seto %al +; SSE2-NEXT: movq $-1, %r9 +; SSE2-NEXT: movl $0, %edx +; SSE2-NEXT: cmovoq %r9, %rdx ; SSE2-NEXT: movq %rsi, %xmm1 ; SSE2-NEXT: imulq %r8, %rcx ; SSE2-NEXT: movq %rcx, %xmm0 @@ -85,9 +87,8 @@ define <2 x i32> @smulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun ; SSE2-NEXT: pand %xmm2, %xmm0 ; SSE2-NEXT: pcmpeqd %xmm2, %xmm2 ; SSE2-NEXT: pxor %xmm0, %xmm2 -; SSE2-NEXT: movq %rax, %xmm0 -; SSE2-NEXT: seto %al -; SSE2-NEXT: movzbl %al, %eax +; SSE2-NEXT: movq %rdx, %xmm0 +; SSE2-NEXT: cmovoq %r9, %rax ; SSE2-NEXT: movq %rax, %xmm3 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] ; SSE2-NEXT: por %xmm2, %xmm0 @@ -115,7 +116,9 @@ define <2 x i32> @smulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun ; SSSE3-NEXT: movq %xmm1, %rsi ; SSSE3-NEXT: xorl %eax, %eax ; SSSE3-NEXT: imulq %rdx, %rsi -; SSSE3-NEXT: seto %al +; SSSE3-NEXT: movq $-1, %r9 +; SSSE3-NEXT: movl $0, %edx +; SSSE3-NEXT: cmovoq %r9, %rdx ; SSSE3-NEXT: movq %rsi, %xmm1 ; SSSE3-NEXT: imulq %r8, %rcx ; SSSE3-NEXT: movq %rcx, %xmm0 @@ -131,9 +134,8 @@ define <2 x i32> @smulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun ; SSSE3-NEXT: pand %xmm2, %xmm0 ; SSSE3-NEXT: pcmpeqd %xmm2, %xmm2 ; SSSE3-NEXT: pxor %xmm0, %xmm2 -; SSSE3-NEXT: movq %rax, %xmm0 -; SSSE3-NEXT: seto %al -; SSSE3-NEXT: movzbl %al, %eax +; SSSE3-NEXT: movq %rdx, %xmm0 +; SSSE3-NEXT: cmovoq %r9, %rax ; SSSE3-NEXT: movq %rax, %xmm3 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] ; SSSE3-NEXT: por %xmm2, %xmm0 @@ -157,9 +159,10 @@ define <2 x i32> @smulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun ; SSE41-NEXT: pextrq $1, %xmm1, %rsi ; SSE41-NEXT: xorl %eax, %eax ; SSE41-NEXT: imulq %rdx, %rsi -; SSE41-NEXT: seto %al +; SSE41-NEXT: movq $-1, %r9 +; SSE41-NEXT: movl $0, %edx +; SSE41-NEXT: cmovoq %r9, %rdx ; SSE41-NEXT: movq %rsi, %xmm0 -; SSE41-NEXT: xorl %edx, %edx ; SSE41-NEXT: imulq %r8, %rcx ; SSE41-NEXT: movq %rcx, %xmm1 ; SSE41-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] @@ -170,9 +173,9 @@ define <2 x i32> @smulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun ; SSE41-NEXT: pcmpeqq %xmm1, %xmm0 ; SSE41-NEXT: pcmpeqd %xmm2, %xmm2 ; SSE41-NEXT: pxor %xmm0, %xmm2 -; SSE41-NEXT: movq %rax, %xmm3 -; SSE41-NEXT: seto %dl -; SSE41-NEXT: movq %rdx, %xmm0 +; SSE41-NEXT: movq %rdx, %xmm3 +; SSE41-NEXT: cmovoq %r9, %rax +; SSE41-NEXT: movq %rax, %xmm0 ; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] ; SSE41-NEXT: por %xmm2, %xmm0 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] @@ -193,9 +196,10 @@ define <2 x i32> @smulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun ; AVX1-NEXT: vpextrq $1, %xmm0, %rsi ; AVX1-NEXT: xorl %eax, %eax ; AVX1-NEXT: imulq %rdx, %rsi -; AVX1-NEXT: seto %al +; AVX1-NEXT: movq $-1, %r9 +; AVX1-NEXT: movl $0, %edx +; AVX1-NEXT: cmovoq %r9, %rdx ; AVX1-NEXT: vmovq %rsi, %xmm0 -; AVX1-NEXT: xorl %edx, %edx ; AVX1-NEXT: imulq %r8, %rcx ; AVX1-NEXT: vmovq %rcx, %xmm1 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] @@ -205,9 +209,9 @@ define <2 x i32> @smulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun ; AVX1-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vmovq %rax, %xmm2 -; AVX1-NEXT: seto %dl -; AVX1-NEXT: vmovq %rdx, %xmm3 +; AVX1-NEXT: vmovq %rdx, %xmm2 +; AVX1-NEXT: cmovoq %r9, %rax +; AVX1-NEXT: vmovq %rax, %xmm3 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0] ; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] @@ -228,9 +232,10 @@ define <2 x i32> @smulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun ; AVX2-NEXT: vpextrq $1, %xmm0, %rsi ; AVX2-NEXT: xorl %eax, %eax ; AVX2-NEXT: imulq %rdx, %rsi -; AVX2-NEXT: seto %al +; AVX2-NEXT: movq $-1, %r9 +; AVX2-NEXT: movl $0, %edx +; AVX2-NEXT: cmovoq %r9, %rdx ; AVX2-NEXT: vmovq %rsi, %xmm0 -; AVX2-NEXT: xorl %edx, %edx ; AVX2-NEXT: imulq %r8, %rcx ; AVX2-NEXT: vmovq %rcx, %xmm1 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] @@ -240,9 +245,9 @@ define <2 x i32> @smulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun ; AVX2-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vmovq %rax, %xmm2 -; AVX2-NEXT: seto %dl -; AVX2-NEXT: vmovq %rdx, %xmm3 +; AVX2-NEXT: vmovq %rdx, %xmm2 +; AVX2-NEXT: cmovoq %r9, %rax +; AVX2-NEXT: vmovq %rax, %xmm3 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0] ; AVX2-NEXT: vpor %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] @@ -766,11 +771,7 @@ define <6 x i32> @smulo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) noun ; AVX1-NEXT: vpsrad $31, %xmm1, %xmm0 ; AVX1-NEXT: vpcmpeqd %xmm0, %xmm5, %xmm0 ; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0 -; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpmovsxwd %xmm0, %xmm3 -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; AVX1-NEXT: vmovq %xmm2, 16(%rdi) ; AVX1-NEXT: vmovdqa %xmm1, (%rdi) ; AVX1-NEXT: retq @@ -788,9 +789,6 @@ define <6 x i32> @smulo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) noun ; AVX2-NEXT: vpcmpeqd %ymm0, %ymm2, %ymm0 ; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 ; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX2-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 ; AVX2-NEXT: vmovq %xmm2, 16(%rdi) ; AVX2-NEXT: vmovdqa %xmm1, (%rdi) @@ -970,17 +968,13 @@ define <8 x i32> @smulo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) noun ; AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm6 ; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,1,3,3] ; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3],xmm6[4,5],xmm5[6,7] -; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1 -; AVX1-NEXT: vpcmpeqd %xmm1, %xmm5, %xmm1 -; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1 -; AVX1-NEXT: vpackssdw %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2 -; AVX1-NEXT: vpmovsxwd %xmm1, %xmm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1-NEXT: vmovaps %ymm2, (%rdi) +; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm1 +; AVX1-NEXT: vpsrad $31, %xmm1, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm0, %xmm5, %xmm0 +; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: vmovaps %ymm1, (%rdi) ; AVX1-NEXT: retq ; ; AVX2-LABEL: smulo_v8i32: @@ -996,9 +990,6 @@ define <8 x i32> @smulo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) noun ; AVX2-NEXT: vpcmpeqd %ymm0, %ymm2, %ymm0 ; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 ; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX2-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX2-NEXT: vmovdqa %ymm1, (%rdi) ; AVX2-NEXT: retq ; @@ -1741,11 +1732,12 @@ define <2 x i32> @smulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) noun ; SSE2-NEXT: movq %xmm0, %rsi ; SSE2-NEXT: xorl %eax, %eax ; SSE2-NEXT: imulq %rdx, %rsi -; SSE2-NEXT: seto %al -; SSE2-NEXT: movq %rax, %xmm0 -; SSE2-NEXT: xorl %eax, %eax +; SSE2-NEXT: movq $-1, %r9 +; SSE2-NEXT: movl $0, %edx +; SSE2-NEXT: cmovoq %r9, %rdx +; SSE2-NEXT: movq %rdx, %xmm0 ; SSE2-NEXT: imulq %r8, %rcx -; SSE2-NEXT: seto %al +; SSE2-NEXT: cmovoq %r9, %rax ; SSE2-NEXT: movq %rax, %xmm1 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE2-NEXT: movq %rsi, %xmm1 @@ -1764,11 +1756,12 @@ define <2 x i32> @smulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) noun ; SSSE3-NEXT: movq %xmm0, %rsi ; SSSE3-NEXT: xorl %eax, %eax ; SSSE3-NEXT: imulq %rdx, %rsi -; SSSE3-NEXT: seto %al -; SSSE3-NEXT: movq %rax, %xmm0 -; SSSE3-NEXT: xorl %eax, %eax +; SSSE3-NEXT: movq $-1, %r9 +; SSSE3-NEXT: movl $0, %edx +; SSSE3-NEXT: cmovoq %r9, %rdx +; SSSE3-NEXT: movq %rdx, %xmm0 ; SSSE3-NEXT: imulq %r8, %rcx -; SSSE3-NEXT: seto %al +; SSSE3-NEXT: cmovoq %r9, %rax ; SSSE3-NEXT: movq %rax, %xmm1 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSSE3-NEXT: movq %rsi, %xmm1 @@ -1785,11 +1778,12 @@ define <2 x i32> @smulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) noun ; SSE41-NEXT: pextrq $1, %xmm0, %rsi ; SSE41-NEXT: xorl %eax, %eax ; SSE41-NEXT: imulq %rdx, %rsi -; SSE41-NEXT: seto %al -; SSE41-NEXT: movq %rax, %xmm1 -; SSE41-NEXT: xorl %eax, %eax +; SSE41-NEXT: movq $-1, %r9 +; SSE41-NEXT: movl $0, %edx +; SSE41-NEXT: cmovoq %r9, %rdx +; SSE41-NEXT: movq %rdx, %xmm1 ; SSE41-NEXT: imulq %r8, %rcx -; SSE41-NEXT: seto %al +; SSE41-NEXT: cmovoq %r9, %rax ; SSE41-NEXT: movq %rax, %xmm0 ; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE41-NEXT: movq %rsi, %xmm1 @@ -1806,11 +1800,12 @@ define <2 x i32> @smulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) noun ; AVX1-NEXT: vpextrq $1, %xmm0, %rsi ; AVX1-NEXT: xorl %eax, %eax ; AVX1-NEXT: imulq %rdx, %rsi -; AVX1-NEXT: seto %al -; AVX1-NEXT: vmovq %rax, %xmm0 -; AVX1-NEXT: xorl %eax, %eax +; AVX1-NEXT: movq $-1, %r9 +; AVX1-NEXT: movl $0, %edx +; AVX1-NEXT: cmovoq %r9, %rdx +; AVX1-NEXT: vmovq %rdx, %xmm0 ; AVX1-NEXT: imulq %r8, %rcx -; AVX1-NEXT: seto %al +; AVX1-NEXT: cmovoq %r9, %rax ; AVX1-NEXT: vmovq %rax, %xmm1 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX1-NEXT: vmovq %rsi, %xmm1 @@ -1827,11 +1822,12 @@ define <2 x i32> @smulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) noun ; AVX2-NEXT: vpextrq $1, %xmm0, %rsi ; AVX2-NEXT: xorl %eax, %eax ; AVX2-NEXT: imulq %rdx, %rsi -; AVX2-NEXT: seto %al -; AVX2-NEXT: vmovq %rax, %xmm0 -; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: movq $-1, %r9 +; AVX2-NEXT: movl $0, %edx +; AVX2-NEXT: cmovoq %r9, %rdx +; AVX2-NEXT: vmovq %rdx, %xmm0 ; AVX2-NEXT: imulq %r8, %rcx -; AVX2-NEXT: seto %al +; AVX2-NEXT: cmovoq %r9, %rax ; AVX2-NEXT: vmovq %rax, %xmm1 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX2-NEXT: vmovq %rsi, %xmm1 @@ -2358,7 +2354,8 @@ define <4 x i32> @smulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind ; AVX512-NEXT: cmpb %r8b, %cl ; AVX512-NEXT: setne %cl ; AVX512-NEXT: orb %al, %cl -; AVX512-NEXT: kmovd %ecx, %k0 +; AVX512-NEXT: setne %al +; AVX512-NEXT: kmovd %eax, %k0 ; AVX512-NEXT: kshiftrw $1, %k0, %k1 ; AVX512-NEXT: movl %edx, %eax ; AVX512-NEXT: imulb %sil @@ -2370,7 +2367,8 @@ define <4 x i32> @smulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind ; AVX512-NEXT: cmpb %dl, %cl ; AVX512-NEXT: setne %cl ; AVX512-NEXT: orb %al, %cl -; AVX512-NEXT: kmovd %ecx, %k2 +; AVX512-NEXT: setne %al +; AVX512-NEXT: kmovd %eax, %k2 ; AVX512-NEXT: kxorw %k2, %k1, %k1 ; AVX512-NEXT: kshiftlw $15, %k1, %k1 ; AVX512-NEXT: kshiftrw $14, %k1, %k1 @@ -2386,7 +2384,8 @@ define <4 x i32> @smulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind ; AVX512-NEXT: cmpb %sil, %cl ; AVX512-NEXT: setne %cl ; AVX512-NEXT: orb %al, %cl -; AVX512-NEXT: kmovd %ecx, %k2 +; AVX512-NEXT: setne %al +; AVX512-NEXT: kmovd %eax, %k2 ; AVX512-NEXT: kxorw %k2, %k1, %k1 ; AVX512-NEXT: kshiftlw $15, %k1, %k1 ; AVX512-NEXT: kshiftrw $13, %k1, %k1 @@ -2403,7 +2402,8 @@ define <4 x i32> @smulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind ; AVX512-NEXT: cmpb %al, %bl ; AVX512-NEXT: setne %bl ; AVX512-NEXT: orb %cl, %bl -; AVX512-NEXT: kmovd %ebx, %k1 +; AVX512-NEXT: setne %cl +; AVX512-NEXT: kmovd %ecx, %k1 ; AVX512-NEXT: kshiftlw $3, %k1, %k1 ; AVX512-NEXT: korw %k1, %k0, %k1 ; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/vec_ssubo.ll b/llvm/test/CodeGen/X86/vec_ssubo.ll index 80567d3ea72a..ac1c89f3ed6f 100644 --- a/llvm/test/CodeGen/X86/vec_ssubo.ll +++ b/llvm/test/CodeGen/X86/vec_ssubo.ll @@ -597,12 +597,6 @@ define <6 x i32> @ssubo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) noun ; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 ; AVX1-NEXT: vandps %ymm0, %ymm8, %ymm0 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpmovsxwd %xmm0, %xmm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 ; AVX1-NEXT: vmovq %xmm6, 16(%rdi) ; AVX1-NEXT: vmovdqa %xmm1, (%rdi) ; AVX1-NEXT: retq @@ -622,9 +616,6 @@ define <6 x i32> @ssubo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) noun ; AVX2-NEXT: vpcmpeqd %ymm0, %ymm5, %ymm0 ; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm0 ; AVX2-NEXT: vpandn %ymm0, %ymm3, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX2-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 ; AVX2-NEXT: vmovq %xmm2, 16(%rdi) ; AVX2-NEXT: vmovdqa %xmm1, (%rdi) @@ -716,21 +707,15 @@ define <8 x i32> @ssubo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) noun ; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4 ; AVX1-NEXT: vpcmpeqd %xmm4, %xmm7, %xmm4 ; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpcmpgtd %xmm0, %xmm3, %xmm1 -; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm1 -; AVX1-NEXT: vpcmpeqd %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1 -; AVX1-NEXT: vandps %ymm1, %ymm8, %ymm1 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm2 -; AVX1-NEXT: vpmovsxwd %xmm1, %xmm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1-NEXT: vmovaps %ymm2, (%rdi) +; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm1 +; AVX1-NEXT: vpcmpgtd %xmm1, %xmm3, %xmm0 +; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 +; AVX1-NEXT: vandps %ymm0, %ymm8, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm1, %ymm1 +; AVX1-NEXT: vmovaps %ymm1, (%rdi) ; AVX1-NEXT: retq ; ; AVX2-LABEL: ssubo_v8i32: @@ -748,9 +733,6 @@ define <8 x i32> @ssubo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) noun ; AVX2-NEXT: vpcmpeqd %ymm0, %ymm5, %ymm0 ; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm0 ; AVX2-NEXT: vpandn %ymm0, %ymm3, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX2-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX2-NEXT: vmovdqa %ymm1, (%rdi) ; AVX2-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vec_uaddo.ll b/llvm/test/CodeGen/X86/vec_uaddo.ll index f902ee18ce04..93c3954afb7a 100644 --- a/llvm/test/CodeGen/X86/vec_uaddo.ll +++ b/llvm/test/CodeGen/X86/vec_uaddo.ll @@ -406,11 +406,7 @@ define <6 x i32> @uaddo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) noun ; AVX1-NEXT: vpmaxud %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpcmpeqd %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0 -; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpmovsxwd %xmm0, %xmm3 -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; AVX1-NEXT: vmovq %xmm2, 16(%rdi) ; AVX1-NEXT: vmovdqa %xmm1, (%rdi) ; AVX1-NEXT: retq @@ -422,9 +418,6 @@ define <6 x i32> @uaddo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) noun ; AVX2-NEXT: vpcmpeqd %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 ; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX2-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 ; AVX2-NEXT: vmovq %xmm2, 16(%rdi) ; AVX2-NEXT: vmovdqa %xmm1, (%rdi) @@ -507,12 +500,8 @@ define <8 x i32> @uaddo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) noun ; AVX1-NEXT: vpmaxud %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpcmpeqd %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0 -; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX1-NEXT: vpmovsxwd %xmm0, %xmm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 ; AVX1-NEXT: vmovaps %ymm1, (%rdi) ; AVX1-NEXT: retq ; @@ -523,9 +512,6 @@ define <8 x i32> @uaddo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) noun ; AVX2-NEXT: vpcmpeqd %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 ; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX2-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX2-NEXT: vmovdqa %ymm1, (%rdi) ; AVX2-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vec_umulo.ll b/llvm/test/CodeGen/X86/vec_umulo.ll index 031eb9aa7f9e..6920e41055e8 100644 --- a/llvm/test/CodeGen/X86/vec_umulo.ll +++ b/llvm/test/CodeGen/X86/vec_umulo.ll @@ -63,16 +63,17 @@ define <2 x i32> @umulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun ; SSE2-NEXT: movq %xmm3, %r8 ; SSE2-NEXT: pand %xmm2, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] -; SSE2-NEXT: movq %xmm2, %r9 +; SSE2-NEXT: movq %xmm2, %r10 ; SSE2-NEXT: movq %xmm0, %rax -; SSE2-NEXT: movq %xmm1, %rcx +; SSE2-NEXT: movq %xmm1, %rdx ; SSE2-NEXT: xorl %esi, %esi -; SSE2-NEXT: mulq %rcx -; SSE2-NEXT: seto %sil +; SSE2-NEXT: mulq %rdx +; SSE2-NEXT: movq $-1, %r9 +; SSE2-NEXT: movl $0, %ecx +; SSE2-NEXT: cmovoq %r9, %rcx ; SSE2-NEXT: movq %rax, %xmm0 -; SSE2-NEXT: xorl %ecx, %ecx ; SSE2-NEXT: movq %r8, %rax -; SSE2-NEXT: mulq %r9 +; SSE2-NEXT: mulq %r10 ; SSE2-NEXT: movq %rax, %xmm1 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3] @@ -83,9 +84,9 @@ define <2 x i32> @umulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun ; SSE2-NEXT: pand %xmm2, %xmm0 ; SSE2-NEXT: pcmpeqd %xmm2, %xmm2 ; SSE2-NEXT: pxor %xmm0, %xmm2 -; SSE2-NEXT: movq %rsi, %xmm0 -; SSE2-NEXT: seto %cl -; SSE2-NEXT: movq %rcx, %xmm3 +; SSE2-NEXT: movq %rcx, %xmm0 +; SSE2-NEXT: cmovoq %r9, %rsi +; SSE2-NEXT: movq %rsi, %xmm3 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] ; SSE2-NEXT: por %xmm2, %xmm0 ; SSE2-NEXT: movq %xmm1, (%rdi) @@ -99,16 +100,17 @@ define <2 x i32> @umulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun ; SSSE3-NEXT: movq %xmm3, %r8 ; SSSE3-NEXT: pand %xmm2, %xmm1 ; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] -; SSSE3-NEXT: movq %xmm2, %r9 +; SSSE3-NEXT: movq %xmm2, %r10 ; SSSE3-NEXT: movq %xmm0, %rax -; SSSE3-NEXT: movq %xmm1, %rcx +; SSSE3-NEXT: movq %xmm1, %rdx ; SSSE3-NEXT: xorl %esi, %esi -; SSSE3-NEXT: mulq %rcx -; SSSE3-NEXT: seto %sil +; SSSE3-NEXT: mulq %rdx +; SSSE3-NEXT: movq $-1, %r9 +; SSSE3-NEXT: movl $0, %ecx +; SSSE3-NEXT: cmovoq %r9, %rcx ; SSSE3-NEXT: movq %rax, %xmm0 -; SSSE3-NEXT: xorl %ecx, %ecx ; SSSE3-NEXT: movq %r8, %rax -; SSSE3-NEXT: mulq %r9 +; SSSE3-NEXT: mulq %r10 ; SSSE3-NEXT: movq %rax, %xmm1 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3] @@ -119,9 +121,9 @@ define <2 x i32> @umulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun ; SSSE3-NEXT: pand %xmm2, %xmm0 ; SSSE3-NEXT: pcmpeqd %xmm2, %xmm2 ; SSSE3-NEXT: pxor %xmm0, %xmm2 -; SSSE3-NEXT: movq %rsi, %xmm0 -; SSSE3-NEXT: seto %cl -; SSSE3-NEXT: movq %rcx, %xmm3 +; SSSE3-NEXT: movq %rcx, %xmm0 +; SSSE3-NEXT: cmovoq %r9, %rsi +; SSSE3-NEXT: movq %rsi, %xmm3 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] ; SSSE3-NEXT: por %xmm2, %xmm0 ; SSSE3-NEXT: movq %xmm1, (%rdi) @@ -131,23 +133,24 @@ define <2 x i32> @umulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun ; SSE41: # %bb.0: ; SSE41-NEXT: pxor %xmm2, %xmm2 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] -; SSE41-NEXT: movq %xmm0, %r9 +; SSE41-NEXT: movq %xmm0, %r8 ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] -; SSE41-NEXT: movq %xmm1, %rsi +; SSE41-NEXT: movq %xmm1, %rcx ; SSE41-NEXT: pextrq $1, %xmm0, %rax ; SSE41-NEXT: pextrq $1, %xmm1, %rdx -; SSE41-NEXT: xorl %ecx, %ecx +; SSE41-NEXT: xorl %esi, %esi ; SSE41-NEXT: mulq %rdx -; SSE41-NEXT: movq %rax, %r8 -; SSE41-NEXT: seto %cl -; SSE41-NEXT: movq %rcx, %xmm0 -; SSE41-NEXT: xorl %ecx, %ecx -; SSE41-NEXT: movq %r9, %rax -; SSE41-NEXT: mulq %rsi -; SSE41-NEXT: seto %cl -; SSE41-NEXT: movq %rcx, %xmm1 +; SSE41-NEXT: movq %rax, %r9 +; SSE41-NEXT: movq $-1, %r10 +; SSE41-NEXT: movl $0, %eax +; SSE41-NEXT: cmovoq %r10, %rax +; SSE41-NEXT: movq %rax, %xmm0 +; SSE41-NEXT: movq %r8, %rax +; SSE41-NEXT: mulq %rcx +; SSE41-NEXT: cmovoq %r10, %rsi +; SSE41-NEXT: movq %rsi, %xmm1 ; SSE41-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] -; SSE41-NEXT: movq %r8, %xmm0 +; SSE41-NEXT: movq %r9, %xmm0 ; SSE41-NEXT: movq %rax, %xmm3 ; SSE41-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0] ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,2,2,3] @@ -163,23 +166,24 @@ define <2 x i32> @umulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun ; AVX1: # %bb.0: ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] -; AVX1-NEXT: vmovq %xmm0, %r9 +; AVX1-NEXT: vmovq %xmm0, %r8 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] -; AVX1-NEXT: vmovq %xmm1, %rsi +; AVX1-NEXT: vmovq %xmm1, %rcx ; AVX1-NEXT: vpextrq $1, %xmm0, %rax ; AVX1-NEXT: vpextrq $1, %xmm1, %rdx -; AVX1-NEXT: xorl %ecx, %ecx +; AVX1-NEXT: xorl %esi, %esi ; AVX1-NEXT: mulq %rdx -; AVX1-NEXT: movq %rax, %r8 -; AVX1-NEXT: seto %cl -; AVX1-NEXT: vmovq %rcx, %xmm0 -; AVX1-NEXT: xorl %ecx, %ecx -; AVX1-NEXT: movq %r9, %rax -; AVX1-NEXT: mulq %rsi -; AVX1-NEXT: seto %cl -; AVX1-NEXT: vmovq %rcx, %xmm1 +; AVX1-NEXT: movq %rax, %r9 +; AVX1-NEXT: movq $-1, %r10 +; AVX1-NEXT: movl $0, %eax +; AVX1-NEXT: cmovoq %r10, %rax +; AVX1-NEXT: vmovq %rax, %xmm0 +; AVX1-NEXT: movq %r8, %rax +; AVX1-NEXT: mulq %rcx +; AVX1-NEXT: cmovoq %r10, %rsi +; AVX1-NEXT: vmovq %rsi, %xmm1 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX1-NEXT: vmovq %r8, %xmm1 +; AVX1-NEXT: vmovq %r9, %xmm1 ; AVX1-NEXT: vmovq %rax, %xmm3 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm1[0] ; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3 @@ -195,23 +199,24 @@ define <2 x i32> @umulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun ; AVX2: # %bb.0: ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] -; AVX2-NEXT: vmovq %xmm0, %r9 +; AVX2-NEXT: vmovq %xmm0, %r8 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] -; AVX2-NEXT: vmovq %xmm1, %rsi +; AVX2-NEXT: vmovq %xmm1, %rcx ; AVX2-NEXT: vpextrq $1, %xmm0, %rax ; AVX2-NEXT: vpextrq $1, %xmm1, %rdx -; AVX2-NEXT: xorl %ecx, %ecx +; AVX2-NEXT: xorl %esi, %esi ; AVX2-NEXT: mulq %rdx -; AVX2-NEXT: movq %rax, %r8 -; AVX2-NEXT: seto %cl -; AVX2-NEXT: vmovq %rcx, %xmm0 -; AVX2-NEXT: xorl %ecx, %ecx -; AVX2-NEXT: movq %r9, %rax -; AVX2-NEXT: mulq %rsi -; AVX2-NEXT: seto %cl -; AVX2-NEXT: vmovq %rcx, %xmm1 +; AVX2-NEXT: movq %rax, %r9 +; AVX2-NEXT: movq $-1, %r10 +; AVX2-NEXT: movl $0, %eax +; AVX2-NEXT: cmovoq %r10, %rax +; AVX2-NEXT: vmovq %rax, %xmm0 +; AVX2-NEXT: movq %r8, %rax +; AVX2-NEXT: mulq %rcx +; AVX2-NEXT: cmovoq %r10, %rsi +; AVX2-NEXT: vmovq %rsi, %xmm1 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX2-NEXT: vmovq %r8, %xmm1 +; AVX2-NEXT: vmovq %r9, %xmm1 ; AVX2-NEXT: vmovq %rax, %xmm3 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm1[0] ; AVX2-NEXT: vpsrlq $32, %xmm1, %xmm3 @@ -649,18 +654,18 @@ define <6 x i32> @umulo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) noun ; ; AVX1-LABEL: umulo_v6i32: ; AVX1: # %bb.0: -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[1,1,3,3] ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 ; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3] -; AVX1-NEXT: vpmuludq %xmm3, %xmm5, %xmm3 -; AVX1-NEXT: vpmuludq %xmm2, %xmm4, %xmm5 +; AVX1-NEXT: vpmuludq %xmm2, %xmm5, %xmm2 +; AVX1-NEXT: vpmuludq %xmm3, %xmm4, %xmm5 ; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3],xmm5[4,5],xmm3[6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm5[0,1],xmm2[2,3],xmm5[4,5],xmm2[6,7] ; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8 -; AVX1-NEXT: vpcmpeqd %xmm8, %xmm3, %xmm3 +; AVX1-NEXT: vpcmpeqd %xmm8, %xmm2, %xmm2 ; AVX1-NEXT: vpcmpeqd %xmm6, %xmm6, %xmm6 -; AVX1-NEXT: vpxor %xmm6, %xmm3, %xmm3 +; AVX1-NEXT: vpxor %xmm6, %xmm2, %xmm2 ; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[1,1,3,3] ; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] ; AVX1-NEXT: vpmuludq %xmm7, %xmm5, %xmm5 @@ -669,15 +674,12 @@ define <6 x i32> @umulo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) noun ; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7] ; AVX1-NEXT: vpcmpeqd %xmm8, %xmm5, %xmm5 ; AVX1-NEXT: vpxor %xmm6, %xmm5, %xmm5 -; AVX1-NEXT: vpackssdw %xmm3, %xmm5, %xmm3 -; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm1 -; AVX1-NEXT: vpmulld %xmm2, %xmm4, %xmm2 -; AVX1-NEXT: vpmovsxwd %xmm3, %xmm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] -; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3 -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 -; AVX1-NEXT: vmovq %xmm2, 16(%rdi) -; AVX1-NEXT: vmovdqa %xmm1, (%rdi) +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2 +; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpmulld %xmm3, %xmm4, %xmm1 +; AVX1-NEXT: vmovq %xmm1, 16(%rdi) +; AVX1-NEXT: vmovdqa %xmm0, (%rdi) +; AVX1-NEXT: vmovaps %ymm2, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: umulo_v6i32: @@ -692,13 +694,11 @@ define <6 x i32> @umulo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) noun ; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2 ; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3 ; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2 -; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 -; AVX2-NEXT: vpackssdw %xmm3, %xmm2, %xmm2 -; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm1 -; AVX2-NEXT: vpmovsxwd %xmm2, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT: vmovq %xmm2, 16(%rdi) -; AVX2-NEXT: vmovdqa %xmm1, (%rdi) +; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vmovq %xmm1, 16(%rdi) +; AVX2-NEXT: vmovdqa %xmm0, (%rdi) +; AVX2-NEXT: vmovdqa %ymm2, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: umulo_v6i32: @@ -826,18 +826,18 @@ define <8 x i32> @umulo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) noun ; ; AVX1-LABEL: umulo_v8i32: ; AVX1: # %bb.0: -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[1,1,3,3] ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 ; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3] -; AVX1-NEXT: vpmuludq %xmm3, %xmm5, %xmm3 -; AVX1-NEXT: vpmuludq %xmm2, %xmm4, %xmm5 +; AVX1-NEXT: vpmuludq %xmm2, %xmm5, %xmm2 +; AVX1-NEXT: vpmuludq %xmm3, %xmm4, %xmm5 ; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3],xmm5[4,5],xmm3[6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm5[0,1],xmm2[2,3],xmm5[4,5],xmm2[6,7] ; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8 -; AVX1-NEXT: vpcmpeqd %xmm8, %xmm3, %xmm3 +; AVX1-NEXT: vpcmpeqd %xmm8, %xmm2, %xmm2 ; AVX1-NEXT: vpcmpeqd %xmm6, %xmm6, %xmm6 -; AVX1-NEXT: vpxor %xmm6, %xmm3, %xmm3 +; AVX1-NEXT: vpxor %xmm6, %xmm2, %xmm2 ; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[1,1,3,3] ; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] ; AVX1-NEXT: vpmuludq %xmm7, %xmm5, %xmm5 @@ -846,15 +846,12 @@ define <8 x i32> @umulo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) noun ; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7] ; AVX1-NEXT: vpcmpeqd %xmm8, %xmm5, %xmm5 ; AVX1-NEXT: vpxor %xmm6, %xmm5, %xmm5 -; AVX1-NEXT: vpackssdw %xmm3, %xmm5, %xmm3 -; AVX1-NEXT: vpmulld %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2 +; AVX1-NEXT: vpmulld %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm1 -; AVX1-NEXT: vpmovsxwd %xmm3, %xmm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,0,1] -; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX1-NEXT: vmovaps %ymm1, (%rdi) +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 +; AVX1-NEXT: vmovaps %ymm0, (%rdi) +; AVX1-NEXT: vmovaps %ymm2, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: umulo_v8i32: @@ -869,11 +866,9 @@ define <8 x i32> @umulo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) noun ; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2 ; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3 ; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2 -; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 -; AVX2-NEXT: vpackssdw %xmm3, %xmm2, %xmm2 -; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm1 -; AVX2-NEXT: vpmovsxwd %xmm2, %ymm0 -; AVX2-NEXT: vmovdqa %ymm1, (%rdi) +; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vmovdqa %ymm0, (%rdi) +; AVX2-NEXT: vmovdqa %ymm2, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: umulo_v8i32: @@ -1526,12 +1521,13 @@ define <2 x i32> @umulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) noun ; SSE2-NEXT: xorl %ecx, %ecx ; SSE2-NEXT: mulq %rdx ; SSE2-NEXT: movq %rax, %r8 -; SSE2-NEXT: seto %cl -; SSE2-NEXT: movq %rcx, %xmm0 -; SSE2-NEXT: xorl %ecx, %ecx +; SSE2-NEXT: movq $-1, %r10 +; SSE2-NEXT: movl $0, %eax +; SSE2-NEXT: cmovoq %r10, %rax +; SSE2-NEXT: movq %rax, %xmm0 ; SSE2-NEXT: movq %r9, %rax ; SSE2-NEXT: mulq %rsi -; SSE2-NEXT: seto %cl +; SSE2-NEXT: cmovoq %r10, %rcx ; SSE2-NEXT: movq %rcx, %xmm1 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE2-NEXT: movq %r8, %xmm1 @@ -1551,12 +1547,13 @@ define <2 x i32> @umulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) noun ; SSSE3-NEXT: xorl %ecx, %ecx ; SSSE3-NEXT: mulq %rdx ; SSSE3-NEXT: movq %rax, %r8 -; SSSE3-NEXT: seto %cl -; SSSE3-NEXT: movq %rcx, %xmm0 -; SSSE3-NEXT: xorl %ecx, %ecx +; SSSE3-NEXT: movq $-1, %r10 +; SSSE3-NEXT: movl $0, %eax +; SSSE3-NEXT: cmovoq %r10, %rax +; SSSE3-NEXT: movq %rax, %xmm0 ; SSSE3-NEXT: movq %r9, %rax ; SSSE3-NEXT: mulq %rsi -; SSSE3-NEXT: seto %cl +; SSSE3-NEXT: cmovoq %r10, %rcx ; SSSE3-NEXT: movq %rcx, %xmm1 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSSE3-NEXT: movq %r8, %xmm1 @@ -1567,20 +1564,21 @@ define <2 x i32> @umulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) noun ; ; SSE41-LABEL: umulo_v2i64: ; SSE41: # %bb.0: -; SSE41-NEXT: movq %xmm0, %r9 -; SSE41-NEXT: movq %xmm1, %rsi +; SSE41-NEXT: movq %xmm0, %rcx +; SSE41-NEXT: movq %xmm1, %r9 ; SSE41-NEXT: pextrq $1, %xmm0, %rax ; SSE41-NEXT: pextrq $1, %xmm1, %rdx -; SSE41-NEXT: xorl %ecx, %ecx +; SSE41-NEXT: xorl %esi, %esi ; SSE41-NEXT: mulq %rdx ; SSE41-NEXT: movq %rax, %r8 -; SSE41-NEXT: seto %cl -; SSE41-NEXT: movq %rcx, %xmm1 -; SSE41-NEXT: xorl %ecx, %ecx -; SSE41-NEXT: movq %r9, %rax -; SSE41-NEXT: mulq %rsi -; SSE41-NEXT: seto %cl -; SSE41-NEXT: movq %rcx, %xmm0 +; SSE41-NEXT: movq $-1, %r10 +; SSE41-NEXT: movl $0, %eax +; SSE41-NEXT: cmovoq %r10, %rax +; SSE41-NEXT: movq %rax, %xmm1 +; SSE41-NEXT: movq %rcx, %rax +; SSE41-NEXT: mulq %r9 +; SSE41-NEXT: cmovoq %r10, %rsi +; SSE41-NEXT: movq %rsi, %xmm0 ; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE41-NEXT: movq %r8, %xmm1 ; SSE41-NEXT: movq %rax, %xmm2 @@ -1590,20 +1588,21 @@ define <2 x i32> @umulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) noun ; ; AVX1-LABEL: umulo_v2i64: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovq %xmm0, %r9 -; AVX1-NEXT: vmovq %xmm1, %rsi +; AVX1-NEXT: vmovq %xmm0, %rcx +; AVX1-NEXT: vmovq %xmm1, %r9 ; AVX1-NEXT: vpextrq $1, %xmm0, %rax ; AVX1-NEXT: vpextrq $1, %xmm1, %rdx -; AVX1-NEXT: xorl %ecx, %ecx +; AVX1-NEXT: xorl %esi, %esi ; AVX1-NEXT: mulq %rdx ; AVX1-NEXT: movq %rax, %r8 -; AVX1-NEXT: seto %cl -; AVX1-NEXT: vmovq %rcx, %xmm0 -; AVX1-NEXT: xorl %ecx, %ecx -; AVX1-NEXT: movq %r9, %rax -; AVX1-NEXT: mulq %rsi -; AVX1-NEXT: seto %cl -; AVX1-NEXT: vmovq %rcx, %xmm1 +; AVX1-NEXT: movq $-1, %r10 +; AVX1-NEXT: movl $0, %eax +; AVX1-NEXT: cmovoq %r10, %rax +; AVX1-NEXT: vmovq %rax, %xmm0 +; AVX1-NEXT: movq %rcx, %rax +; AVX1-NEXT: mulq %r9 +; AVX1-NEXT: cmovoq %r10, %rsi +; AVX1-NEXT: vmovq %rsi, %xmm1 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX1-NEXT: vmovq %r8, %xmm1 ; AVX1-NEXT: vmovq %rax, %xmm2 @@ -1613,20 +1612,21 @@ define <2 x i32> @umulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) noun ; ; AVX2-LABEL: umulo_v2i64: ; AVX2: # %bb.0: -; AVX2-NEXT: vmovq %xmm0, %r9 -; AVX2-NEXT: vmovq %xmm1, %rsi +; AVX2-NEXT: vmovq %xmm0, %rcx +; AVX2-NEXT: vmovq %xmm1, %r9 ; AVX2-NEXT: vpextrq $1, %xmm0, %rax ; AVX2-NEXT: vpextrq $1, %xmm1, %rdx -; AVX2-NEXT: xorl %ecx, %ecx +; AVX2-NEXT: xorl %esi, %esi ; AVX2-NEXT: mulq %rdx ; AVX2-NEXT: movq %rax, %r8 -; AVX2-NEXT: seto %cl -; AVX2-NEXT: vmovq %rcx, %xmm0 -; AVX2-NEXT: xorl %ecx, %ecx -; AVX2-NEXT: movq %r9, %rax -; AVX2-NEXT: mulq %rsi -; AVX2-NEXT: seto %cl -; AVX2-NEXT: vmovq %rcx, %xmm1 +; AVX2-NEXT: movq $-1, %r10 +; AVX2-NEXT: movl $0, %eax +; AVX2-NEXT: cmovoq %r10, %rax +; AVX2-NEXT: vmovq %rax, %xmm0 +; AVX2-NEXT: movq %rcx, %rax +; AVX2-NEXT: mulq %r9 +; AVX2-NEXT: cmovoq %r10, %rsi +; AVX2-NEXT: vmovq %rsi, %xmm1 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX2-NEXT: vmovq %r8, %xmm1 ; AVX2-NEXT: vmovq %rax, %xmm2 @@ -2090,7 +2090,8 @@ define <4 x i32> @umulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind ; AVX512-NEXT: testb $-2, %r8b ; AVX512-NEXT: setne %cl ; AVX512-NEXT: orb %al, %cl -; AVX512-NEXT: kmovd %ecx, %k0 +; AVX512-NEXT: setne %al +; AVX512-NEXT: kmovd %eax, %k0 ; AVX512-NEXT: kshiftrw $1, %k0, %k1 ; AVX512-NEXT: movl %edx, %eax ; AVX512-NEXT: mulb %sil @@ -2099,7 +2100,8 @@ define <4 x i32> @umulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind ; AVX512-NEXT: testb $-2, %dl ; AVX512-NEXT: setne %cl ; AVX512-NEXT: orb %al, %cl -; AVX512-NEXT: kmovd %ecx, %k2 +; AVX512-NEXT: setne %al +; AVX512-NEXT: kmovd %eax, %k2 ; AVX512-NEXT: kxorw %k2, %k1, %k1 ; AVX512-NEXT: kshiftlw $15, %k1, %k1 ; AVX512-NEXT: kshiftrw $14, %k1, %k1 @@ -2112,7 +2114,8 @@ define <4 x i32> @umulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind ; AVX512-NEXT: testb $-2, %sil ; AVX512-NEXT: setne %cl ; AVX512-NEXT: orb %al, %cl -; AVX512-NEXT: kmovd %ecx, %k2 +; AVX512-NEXT: setne %al +; AVX512-NEXT: kmovd %eax, %k2 ; AVX512-NEXT: kxorw %k2, %k1, %k1 ; AVX512-NEXT: kshiftlw $15, %k1, %k1 ; AVX512-NEXT: kshiftrw $13, %k1, %k1 @@ -2126,7 +2129,8 @@ define <4 x i32> @umulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind ; AVX512-NEXT: testb $-2, %al ; AVX512-NEXT: setne %bl ; AVX512-NEXT: orb %cl, %bl -; AVX512-NEXT: kmovd %ebx, %k1 +; AVX512-NEXT: setne %cl +; AVX512-NEXT: kmovd %ecx, %k1 ; AVX512-NEXT: kshiftlw $3, %k1, %k1 ; AVX512-NEXT: korw %k1, %k0, %k1 ; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/vec_usubo.ll b/llvm/test/CodeGen/X86/vec_usubo.ll index 6aff08e74563..5c843dc504ca 100644 --- a/llvm/test/CodeGen/X86/vec_usubo.ll +++ b/llvm/test/CodeGen/X86/vec_usubo.ll @@ -422,11 +422,7 @@ define <6 x i32> @usubo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) noun ; AVX1-NEXT: vpminud %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpcmpeqd %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0 -; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpmovsxwd %xmm0, %xmm3 -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; AVX1-NEXT: vmovq %xmm2, 16(%rdi) ; AVX1-NEXT: vmovdqa %xmm1, (%rdi) ; AVX1-NEXT: retq @@ -438,9 +434,6 @@ define <6 x i32> @usubo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) noun ; AVX2-NEXT: vpcmpeqd %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 ; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX2-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 ; AVX2-NEXT: vmovq %xmm2, 16(%rdi) ; AVX2-NEXT: vmovdqa %xmm1, (%rdi) @@ -531,12 +524,8 @@ define <8 x i32> @usubo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) noun ; AVX1-NEXT: vpminud %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpcmpeqd %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0 -; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX1-NEXT: vpmovsxwd %xmm0, %xmm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 ; AVX1-NEXT: vmovaps %ymm1, (%rdi) ; AVX1-NEXT: retq ; @@ -547,9 +536,6 @@ define <8 x i32> @usubo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) noun ; AVX2-NEXT: vpcmpeqd %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 ; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX2-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX2-NEXT: vmovdqa %ymm1, (%rdi) ; AVX2-NEXT: retq ;