[X86][SSE] Attempt to pre-truncate arithmetic operations that have already been extended

As discussed on D28219 - it is profitable to combine trunc(binop (s/zext(x), s/zext(y)) to binop(trunc(s/zext(x)), trunc(s/zext(y))) assuming the trunc(ext()) will simplify further

llvm-svn: 292493
This commit is contained in:
Simon Pilgrim 2017-01-19 16:25:02 +00:00
parent 291c3d8ff2
commit 5f2f53b106
2 changed files with 97 additions and 235 deletions

View File

@ -32348,13 +32348,23 @@ static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
EVT VT = N->getValueType(0); EVT VT = N->getValueType(0);
EVT SrcVT = Src.getValueType(); EVT SrcVT = Src.getValueType();
auto IsRepeatedOpOrOneUseConstant = [](SDValue Op0, SDValue Op1) { auto IsRepeatedOpOrFreeTruncation = [VT](SDValue Op0, SDValue Op1) {
// TODO: Add extra cases where we can truncate both inputs for the unsigned TruncSizeInBits = VT.getScalarSizeInBits();
// cost of one (or none).
// e.g. TRUNC( BINOP( EXT( X ), EXT( Y ) ) ) --> BINOP( X, Y )
if (Op0 == Op1) if (Op0 == Op1)
return true; return true;
unsigned Opcode0 = Op0.getOpcode();
if ((Opcode0 == ISD::ANY_EXTEND || Opcode0 == ISD::SIGN_EXTEND ||
Opcode0 == ISD::ZERO_EXTEND) &&
Op0.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
return true;
unsigned Opcode1 = Op1.getOpcode();
if ((Opcode1 == ISD::ANY_EXTEND || Opcode1 == ISD::SIGN_EXTEND ||
Opcode1 == ISD::ZERO_EXTEND) &&
Op1.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
return true;
SDValue BC0 = peekThroughOneUseBitcasts(Op0); SDValue BC0 = peekThroughOneUseBitcasts(Op0);
SDValue BC1 = peekThroughOneUseBitcasts(Op1); SDValue BC1 = peekThroughOneUseBitcasts(Op1);
return ISD::isBuildVectorOfConstantSDNodes(BC0.getNode()) || return ISD::isBuildVectorOfConstantSDNodes(BC0.getNode()) ||
@ -32386,7 +32396,7 @@ static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
SDValue Op0 = Src.getOperand(0); SDValue Op0 = Src.getOperand(0);
SDValue Op1 = Src.getOperand(1); SDValue Op1 = Src.getOperand(1);
if (TLI.isOperationLegalOrPromote(Opcode, VT) && if (TLI.isOperationLegalOrPromote(Opcode, VT) &&
IsRepeatedOpOrOneUseConstant(Op0, Op1)) IsRepeatedOpOrFreeTruncation(Op0, Op1))
return TruncateArithmetic(Op0, Op1); return TruncateArithmetic(Op0, Op1);
break; break;
} }
@ -32402,7 +32412,7 @@ static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
SDValue Op0 = Src.getOperand(0); SDValue Op0 = Src.getOperand(0);
SDValue Op1 = Src.getOperand(1); SDValue Op1 = Src.getOperand(1);
if (TLI.isOperationLegal(Opcode, VT) && if (TLI.isOperationLegal(Opcode, VT) &&
IsRepeatedOpOrOneUseConstant(Op0, Op1)) IsRepeatedOpOrFreeTruncation(Op0, Op1))
return TruncateArithmetic(Op0, Op1); return TruncateArithmetic(Op0, Op1);
break; break;
} }

View File

@ -415,54 +415,43 @@ define <16 x i8> @trunc_add_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
define <8 x i16> @trunc_add_v8i32_v8i16_sext_8i8(<16 x i8> %a0, <8 x i32> %a1) { define <8 x i16> @trunc_add_v8i32_v8i16_sext_8i8(<16 x i8> %a0, <8 x i32> %a1) {
; SSE-LABEL: trunc_add_v8i32_v8i16_sext_8i8: ; SSE-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
; SSE: # BB#0: ; SSE: # BB#0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] ; SSE-NEXT: pslld $16, %xmm2
; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3] ; SSE-NEXT: psrad $16, %xmm2
; SSE-NEXT: psrad $24, %xmm3 ; SSE-NEXT: pslld $16, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; SSE-NEXT: psrad $16, %xmm1
; SSE-NEXT: packssdw %xmm2, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSE-NEXT: psraw $8, %xmm0
; SSE-NEXT: psrad $24, %xmm0 ; SSE-NEXT: paddw %xmm1, %xmm0
; SSE-NEXT: paddd %xmm2, %xmm0
; SSE-NEXT: paddd %xmm1, %xmm3
; SSE-NEXT: pslld $16, %xmm3
; SSE-NEXT: psrad $16, %xmm3
; SSE-NEXT: pslld $16, %xmm0
; SSE-NEXT: psrad $16, %xmm0
; SSE-NEXT: packssdw %xmm0, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm0
; SSE-NEXT: retq ; SSE-NEXT: retq
; ;
; AVX1-LABEL: trunc_add_v8i32_v8i16_sext_8i8: ; AVX1-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
; AVX1: # BB#0: ; AVX1: # BB#0:
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0 ; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX1-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpaddd %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] ; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX1-NEXT: vzeroupper ; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq ; AVX1-NEXT: retq
; ;
; AVX2-LABEL: trunc_add_v8i32_v8i16_sext_8i8: ; AVX2-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
; AVX2: # BB#0: ; AVX2: # BB#0:
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0 ; AVX2-NEXT: vpmovsxbw %xmm0, %xmm0
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero ; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper ; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq ; AVX2-NEXT: retq
; ;
; AVX512-LABEL: trunc_add_v8i32_v8i16_sext_8i8: ; AVX512-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
; AVX512: # BB#0: ; AVX512: # BB#0:
; AVX512-NEXT: vpmovsxbd %xmm0, %ymm0 ; AVX512-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpmovdw %zmm1, %ymm1
; AVX512-NEXT: vpmovdw %zmm0, %ymm0 ; AVX512-NEXT: vpmovsxbw %xmm0, %xmm0
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq ; AVX512-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%2 = sext <8 x i8> %1 to <8 x i32> %2 = sext <8 x i8> %1 to <8 x i32>
@ -2308,61 +2297,41 @@ define <8 x i16> @trunc_mul_v8i32_v8i16_zext_8i8(<16 x i8> %a0, <8 x i32> %a1) {
; SSE: # BB#0: ; SSE: # BB#0:
; SSE-NEXT: pxor %xmm3, %xmm3 ; SSE-NEXT: pxor %xmm3, %xmm3
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
; SSE-NEXT: movdqa %xmm0, %xmm4 ; SSE-NEXT: pslld $16, %xmm2
; SSE-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] ; SSE-NEXT: psrad $16, %xmm2
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] ; SSE-NEXT: pslld $16, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] ; SSE-NEXT: psrad $16, %xmm1
; SSE-NEXT: pmuludq %xmm1, %xmm0 ; SSE-NEXT: packssdw %xmm2, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SSE-NEXT: pmullw %xmm1, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE-NEXT: pmuludq %xmm3, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,3,3]
; SSE-NEXT: pmuludq %xmm2, %xmm4
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,2,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; SSE-NEXT: pmuludq %xmm1, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; SSE-NEXT: pslld $16, %xmm3
; SSE-NEXT: psrad $16, %xmm3
; SSE-NEXT: pslld $16, %xmm0
; SSE-NEXT: psrad $16, %xmm0
; SSE-NEXT: packssdw %xmm3, %xmm0
; SSE-NEXT: retq ; SSE-NEXT: retq
; ;
; AVX1-LABEL: trunc_mul_v8i32_v8i16_zext_8i8: ; AVX1-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
; AVX1: # BB#0: ; AVX1: # BB#0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX1-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpmulld %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; AVX1-NEXT: vpmulld %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] ; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX1-NEXT: vzeroupper ; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq ; AVX1-NEXT: retq
; ;
; AVX2-LABEL: trunc_mul_v8i32_v8i16_zext_8i8: ; AVX2-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
; AVX2: # BB#0: ; AVX2: # BB#0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero ; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero ; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] ; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper ; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq ; AVX2-NEXT: retq
; ;
; AVX512-LABEL: trunc_mul_v8i32_v8i16_zext_8i8: ; AVX512-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
; AVX512: # BB#0: ; AVX512: # BB#0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero ; AVX512-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
; AVX512-NEXT: vpmulld %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpmovdw %zmm1, %ymm1
; AVX512-NEXT: vpmovdw %zmm0, %ymm0 ; AVX512-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> ; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq ; AVX512-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%2 = zext <8 x i8> %1 to <8 x i32> %2 = zext <8 x i8> %1 to <8 x i32>
@ -5131,38 +5100,11 @@ define <4 x i32> @mul_add_const_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwi
; SSE-NEXT: paddd {{.*}}(%rip), %xmm0 ; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
; SSE-NEXT: retq ; SSE-NEXT: retq
; ;
; AVX1-LABEL: mul_add_const_v4i64_v4i32: ; AVX-LABEL: mul_add_const_v4i64_v4i32:
; AVX1: # BB#0: ; AVX: # BB#0:
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: retq ; AVX-NEXT: retq
;
; AVX2-LABEL: mul_add_const_v4i64_v4i32:
; AVX2: # BB#0:
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: mul_add_const_v4i64_v4i32:
; AVX512F: # BB#0:
; AVX512F-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mul_add_const_v4i64_v4i32:
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: mul_add_const_v4i64_v4i32:
; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512DQ-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
; AVX512DQ-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX512DQ-NEXT: retq
%1 = sext <4 x i32> %a0 to <4 x i64> %1 = sext <4 x i32> %a0 to <4 x i64>
%2 = sext <4 x i32> %a1 to <4 x i64> %2 = sext <4 x i32> %a1 to <4 x i64>
%3 = mul <4 x i64> %1, %2 %3 = mul <4 x i64> %1, %2
@ -5213,38 +5155,11 @@ define <4 x i32> @mul_add_self_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwin
; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq ; SSE-NEXT: retq
; ;
; AVX1-LABEL: mul_add_self_v4i64_v4i32: ; AVX-LABEL: mul_add_self_v4i64_v4i32:
; AVX1: # BB#0: ; AVX: # BB#0:
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; AVX1-NEXT: retq ; AVX-NEXT: retq
;
; AVX2-LABEL: mul_add_self_v4i64_v4i32:
; AVX2: # BB#0:
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: mul_add_self_v4i64_v4i32:
; AVX512F: # BB#0:
; AVX512F-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mul_add_self_v4i64_v4i32:
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: mul_add_self_v4i64_v4i32:
; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX512DQ-NEXT: vpmovsxdq %xmm1, %ymm1
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
; AVX512DQ-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; AVX512DQ-NEXT: retq
%1 = sext <4 x i32> %a0 to <4 x i64> %1 = sext <4 x i32> %a0 to <4 x i64>
%2 = sext <4 x i32> %a1 to <4 x i64> %2 = sext <4 x i32> %a1 to <4 x i64>
%3 = mul <4 x i64> %1, %2 %3 = mul <4 x i64> %1, %2
@ -5256,102 +5171,39 @@ define <4 x i32> @mul_add_self_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwin
define <4 x i32> @mul_add_multiuse_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind { define <4 x i32> @mul_add_multiuse_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; SSE-LABEL: mul_add_multiuse_v4i64_v4i32: ; SSE-LABEL: mul_add_multiuse_v4i64_v4i32:
; SSE: # BB#0: ; SSE: # BB#0:
; SSE-NEXT: movdqa %xmm0, %xmm2 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
; SSE-NEXT: psrad $31, %xmm2 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,1,1,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
; SSE-NEXT: movdqa %xmm3, %xmm2
; SSE-NEXT: psrad $31, %xmm2
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psrad $31, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSE-NEXT: movdqa %xmm4, %xmm2
; SSE-NEXT: psrad $31, %xmm2
; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
; SSE-NEXT: movdqa %xmm3, %xmm2
; SSE-NEXT: psrlq $32, %xmm2
; SSE-NEXT: pmuludq %xmm4, %xmm2
; SSE-NEXT: movdqa %xmm3, %xmm5 ; SSE-NEXT: movdqa %xmm3, %xmm5
; SSE-NEXT: pmuludq %xmm4, %xmm5 ; SSE-NEXT: psrlq $32, %xmm5
; SSE-NEXT: psrlq $32, %xmm4 ; SSE-NEXT: pmuludq %xmm1, %xmm5
; SSE-NEXT: pmuludq %xmm3, %xmm4 ; SSE-NEXT: movdqa %xmm1, %xmm6
; SSE-NEXT: paddq %xmm2, %xmm4
; SSE-NEXT: psllq $32, %xmm4
; SSE-NEXT: movdqa %xmm0, %xmm6
; SSE-NEXT: psrlq $32, %xmm6 ; SSE-NEXT: psrlq $32, %xmm6
; SSE-NEXT: pmuludq %xmm1, %xmm6 ; SSE-NEXT: pmuludq %xmm3, %xmm6
; SSE-NEXT: movdqa %xmm0, %xmm2 ; SSE-NEXT: paddq %xmm5, %xmm6
; SSE-NEXT: pmuludq %xmm1, %xmm2 ; SSE-NEXT: psllq $32, %xmm6
; SSE-NEXT: pmuludq %xmm1, %xmm3
; SSE-NEXT: paddq %xmm6, %xmm3
; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: psrlq $32, %xmm1 ; SSE-NEXT: psrlq $32, %xmm1
; SSE-NEXT: pmuludq %xmm0, %xmm1 ; SSE-NEXT: pmuludq %xmm4, %xmm1
; SSE-NEXT: paddq %xmm6, %xmm1 ; SSE-NEXT: movdqa %xmm4, %xmm5
; SSE-NEXT: psllq $32, %xmm1 ; SSE-NEXT: psrlq $32, %xmm5
; SSE-NEXT: paddq %xmm0, %xmm1 ; SSE-NEXT: pmuludq %xmm2, %xmm5
; SSE-NEXT: paddq %xmm1, %xmm2 ; SSE-NEXT: paddq %xmm1, %xmm5
; SSE-NEXT: paddq %xmm3, %xmm4 ; SSE-NEXT: psllq $32, %xmm5
; SSE-NEXT: paddq %xmm5, %xmm4 ; SSE-NEXT: pmuludq %xmm4, %xmm2
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm4[0,2] ; SSE-NEXT: paddq %xmm5, %xmm2
; SSE-NEXT: movaps %xmm2, %xmm0 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
; SSE-NEXT: paddd %xmm2, %xmm0
; SSE-NEXT: retq ; SSE-NEXT: retq
; ;
; AVX1-LABEL: mul_add_multiuse_v4i64_v4i32: ; AVX-LABEL: mul_add_multiuse_v4i64_v4i32:
; AVX1: # BB#0: ; AVX: # BB#0:
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm2 ; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0 ; AVX-NEXT: retq
; AVX1-NEXT: vpmovsxdq %xmm1, %xmm3
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
; AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpmuldq %xmm3, %xmm2, %xmm3
; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm2[0,2],xmm0[0,2]
; AVX1-NEXT: retq
;
; AVX2-LABEL: mul_add_multiuse_v4i64_v4i32:
; AVX2: # BB#0:
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
; AVX2-NEXT: vpmuldq %ymm1, %ymm0, %ymm1
; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-LABEL: mul_add_multiuse_v4i64_v4i32:
; AVX512F: # BB#0:
; AVX512F-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX512F-NEXT: vpmovsxdq %xmm1, %ymm1
; AVX512F-NEXT: vpmuldq %ymm1, %ymm0, %ymm1
; AVX512F-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mul_add_multiuse_v4i64_v4i32:
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX512BW-NEXT: vpmovsxdq %xmm1, %ymm1
; AVX512BW-NEXT: vpmuldq %ymm1, %ymm0, %ymm1
; AVX512BW-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: mul_add_multiuse_v4i64_v4i32:
; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX512DQ-NEXT: vpmovsxdq %xmm1, %ymm1
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm1
; AVX512DQ-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX512DQ-NEXT: retq
%1 = sext <4 x i32> %a0 to <4 x i64> %1 = sext <4 x i32> %a0 to <4 x i64>
%2 = sext <4 x i32> %a1 to <4 x i64> %2 = sext <4 x i32> %a1 to <4 x i64>
%3 = mul <4 x i64> %1, %2 %3 = mul <4 x i64> %1, %2