[X86][SSE] Always enable ISD::SRL -> ISD::MULHU for v8i16

For constant non-uniform cases we'll never introduce more and/andn/or selects than already occur in generic pre-SSE41 ISD::SRL lowering.

llvm-svn: 342352
This commit is contained in:
Simon Pilgrim 2018-09-16 20:28:38 +00:00
parent 9e3818af26
commit cffa206423
3 changed files with 38 additions and 84 deletions

View File

@ -23897,7 +23897,6 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
// Constant ISD::SRL can be performed efficiently on vXi16 vectors as we
// can replace with ISD::MULHU, creating scale factor from (NumEltBits - Amt).
if (Opc == ISD::SRL && ConstantAmt &&
(Subtarget.hasSSE41() || DAG.isKnownNeverZero(Amt)) &&
(VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);

View File

@ -592,43 +592,26 @@ define <8 x i16> @combine_vec_udiv_uniform(<8 x i16> %x) {
define <8 x i16> @combine_vec_udiv_nonuniform(<8 x i16> %x) {
; SSE2-LABEL: combine_vec_udiv_nonuniform:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,0,65535,65535,65535,65535]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pand %xmm1, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,0,65535,65535,65535,65535]
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: psrlw $3, %xmm3
; SSE2-NEXT: pandn %xmm3, %xmm1
; SSE2-NEXT: por %xmm2, %xmm1
; SSE2-NEXT: pmulhuw {{.*}}(%rip), %xmm1
; SSE2-NEXT: psubw %xmm1, %xmm0
; SSE2-NEXT: pandn %xmm3, %xmm2
; SSE2-NEXT: por %xmm1, %xmm2
; SSE2-NEXT: pmulhuw {{.*}}(%rip), %xmm2
; SSE2-NEXT: psubw %xmm2, %xmm0
; SSE2-NEXT: movl $32768, %eax # imm = 0x8000
; SSE2-NEXT: movd %eax, %xmm2
; SSE2-NEXT: pmulhuw %xmm0, %xmm2
; SSE2-NEXT: paddw %xmm1, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,0,65535,65535,0,0,65535]
; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: psrlw $8, %xmm2
; SSE2-NEXT: pandn %xmm2, %xmm1
; SSE2-NEXT: por %xmm0, %xmm1
; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: pmulhuw %xmm0, %xmm1
; SSE2-NEXT: paddw %xmm2, %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,0,0,65535,65535,0]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pandn %xmm1, %xmm2
; SSE2-NEXT: psrlw $4, %xmm1
; SSE2-NEXT: pmulhuw {{.*}}(%rip), %xmm1
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: por %xmm2, %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,0,0,65535]
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pand %xmm0, %xmm2
; SSE2-NEXT: psrlw $2, %xmm1
; SSE2-NEXT: pandn %xmm1, %xmm0
; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,0,0,65535,65535,0]
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pandn %xmm0, %xmm2
; SSE2-NEXT: psrlw $1, %xmm0
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: combine_vec_udiv_nonuniform:
@ -867,33 +850,21 @@ define <16 x i8> @combine_vec_udiv_nonuniform4(<16 x i8> %x) {
define <8 x i16> @pr38477(<8 x i16> %a0) {
; SSE2-LABEL: pr38477:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,4957,57457,4103,16385,35545,2048,2115]
; SSE2-NEXT: pmulhuw %xmm0, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: psubw %xmm1, %xmm3
; SSE2-NEXT: pmulhuw {{.*}}(%rip), %xmm3
; SSE2-NEXT: paddw %xmm1, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,0,0,65535,65535,65535]
; SSE2-NEXT: movdqa %xmm3, %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,4957,57457,4103,16385,35545,2048,2115]
; SSE2-NEXT: pmulhuw %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psubw %xmm2, %xmm1
; SSE2-NEXT: pmulhuw {{.*}}(%rip), %xmm1
; SSE2-NEXT: paddw %xmm2, %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,65535,65535,0,65535]
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: pandn %xmm1, %xmm3
; SSE2-NEXT: pmulhuw {{.*}}(%rip), %xmm1
; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: psrlw $8, %xmm3
; SSE2-NEXT: pandn %xmm3, %xmm2
; SSE2-NEXT: por %xmm1, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,0,65535]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pandn %xmm2, %xmm3
; SSE2-NEXT: psrlw $4, %xmm2
; SSE2-NEXT: pand %xmm1, %xmm2
; SSE2-NEXT: por %xmm3, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,0,65535,65535,0,0]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pandn %xmm2, %xmm3
; SSE2-NEXT: psrlw $2, %xmm2
; SSE2-NEXT: pand %xmm1, %xmm2
; SSE2-NEXT: por %xmm3, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,65535,65535]
; SSE2-NEXT: pand %xmm1, %xmm2
; SSE2-NEXT: pandn %xmm0, %xmm1
; SSE2-NEXT: por %xmm3, %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,65535,65535,65535,65535]
; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: pandn %xmm0, %xmm2
; SSE2-NEXT: por %xmm2, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq

View File

@ -919,20 +919,12 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; SSE2-LABEL: constant_shift_v8i16:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlw $4, %xmm1
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: movapd %xmm1, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[2,3]
; SSE2-NEXT: psrlw $2, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; SSE2-NEXT: movaps {{.*#+}} xmm1 = [65535,0,65535,0,65535,0,65535,0]
; SSE2-NEXT: movaps %xmm2, %xmm0
; SSE2-NEXT: andps %xmm1, %xmm0
; SSE2-NEXT: psrlw $1, %xmm2
; SSE2-NEXT: andnps %xmm2, %xmm1
; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,65535,65535]
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pandn %xmm0, %xmm2
; SSE2-NEXT: pmulhuw {{.*}}(%rip), %xmm0
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_shift_v8i16:
@ -981,20 +973,12 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
;
; X32-SSE-LABEL: constant_shift_v8i16:
; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: psrlw $4, %xmm1
; X32-SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; X32-SSE-NEXT: movapd %xmm1, %xmm2
; X32-SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[2,3]
; X32-SSE-NEXT: psrlw $2, %xmm1
; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
; X32-SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; X32-SSE-NEXT: movaps {{.*#+}} xmm1 = [65535,0,65535,0,65535,0,65535,0]
; X32-SSE-NEXT: movaps %xmm2, %xmm0
; X32-SSE-NEXT: andps %xmm1, %xmm0
; X32-SSE-NEXT: psrlw $1, %xmm2
; X32-SSE-NEXT: andnps %xmm2, %xmm1
; X32-SSE-NEXT: orps %xmm1, %xmm0
; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,65535,65535]
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
; X32-SSE-NEXT: pandn %xmm0, %xmm2
; X32-SSE-NEXT: pmulhuw {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT: pand %xmm1, %xmm0
; X32-SSE-NEXT: por %xmm2, %xmm0
; X32-SSE-NEXT: retl
%shift = lshr <8 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
ret <8 x i16> %shift