[X86] Allow movmskpd/ps ISD nodes to be created and selected with integer input types.

This removes an int->fp bitcast between the surrounding code and the movmsk. I had already added a hack to combineMOVMSK to try to look through this bitcast to improve the SimplifyDemandedBits there.

But I found an additional issue where the bitcast was preventing combineMOVMSK from being called again after earlier nodes in the DAG are optimized. The bitcast gets revisted, but not the user of the bitcast. By using integer types throughout, the bitcast doesn't get in the way.

llvm-svn: 343046
This commit is contained in:
Craig Topper 2018-09-25 23:28:27 +00:00
parent d8c68840c8
commit 12c18840fa
4 changed files with 36 additions and 76 deletions

View File

@ -31875,23 +31875,19 @@ static SDValue combineBitcastvxi1(SelectionDAG &DAG, SDValue BitCast,
// For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as:
// (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef)
MVT SExtVT;
MVT FPCastVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
switch (VecVT.getSimpleVT().SimpleTy) {
default:
return SDValue();
case MVT::v2i1:
SExtVT = MVT::v2i64;
FPCastVT = MVT::v2f64;
break;
case MVT::v4i1:
SExtVT = MVT::v4i32;
FPCastVT = MVT::v4f32;
// For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2))
// sign-extend to a 256-bit operation to avoid truncation.
if (N0->getOpcode() == ISD::SETCC && Subtarget.hasAVX() &&
N0->getOperand(0).getValueType().is256BitVector()) {
SExtVT = MVT::v4i64;
FPCastVT = MVT::v4f64;
}
break;
case MVT::v8i1:
@ -31905,7 +31901,6 @@ static SDValue combineBitcastvxi1(SelectionDAG &DAG, SDValue BitCast,
(N0->getOperand(0).getValueType().is256BitVector() ||
N0->getOperand(0).getValueType().is512BitVector())) {
SExtVT = MVT::v8i32;
FPCastVT = MVT::v8f32;
}
break;
case MVT::v16i1:
@ -31921,23 +31916,16 @@ static SDValue combineBitcastvxi1(SelectionDAG &DAG, SDValue BitCast,
};
SDLoc DL(BitCast);
SDValue V = DAG.getSExtOrTrunc(N0, DL, SExtVT);
SDValue V = DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, N0);
if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8) {
V = getPMOVMSKB(DL, V, DAG, Subtarget);
return DAG.getZExtOrTrunc(V, DL, VT);
} else {
if (SExtVT == MVT::v8i16)
V = DAG.getNode(X86ISD::PACKSS, DL, MVT::v16i8, V,
DAG.getUNDEF(MVT::v8i16));
V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
}
if (SExtVT == MVT::v8i16) {
assert(16 == DAG.ComputeNumSignBits(V) && "Expected all/none bit vector");
V = DAG.getNode(X86ISD::PACKSS, DL, MVT::v16i8, V,
DAG.getUNDEF(MVT::v8i16));
} else
assert(SExtVT.getScalarType() != MVT::i16 &&
"Vectors of i16 must be packed");
if (FPCastVT != MVT::INVALID_SIMPLE_VALUE_TYPE)
V = DAG.getBitcast(FPCastVT, V);
V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
return DAG.getZExtOrTrunc(V, DL, VT);
}

View File

@ -2239,6 +2239,16 @@ let Predicates = [HasAVX] in {
SSEPackedSingle>, PS, VEX, VEX_L, VEX_WIG;
defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, v4f64, "movmskpd",
SSEPackedDouble>, PD, VEX, VEX_L, VEX_WIG;
// Also support integer VTs to avoid a int->fp bitcast in the DAG.
def : Pat<(X86movmsk (v4i32 VR128:$src)),
(VMOVMSKPSrr VR128:$src)>;
def : Pat<(X86movmsk (v2i64 VR128:$src)),
(VMOVMSKPDrr VR128:$src)>;
def : Pat<(X86movmsk (v8i32 VR256:$src)),
(VMOVMSKPSYrr VR256:$src)>;
def : Pat<(X86movmsk (v4i64 VR256:$src)),
(VMOVMSKPDYrr VR256:$src)>;
}
defm MOVMSKPS : sse12_extr_sign_mask<VR128, v4f32, "movmskps",
@ -2246,6 +2256,14 @@ defm MOVMSKPS : sse12_extr_sign_mask<VR128, v4f32, "movmskps",
defm MOVMSKPD : sse12_extr_sign_mask<VR128, v2f64, "movmskpd",
SSEPackedDouble>, PD;
let Predicates = [UseSSE2] in {
// Also support integer VTs to avoid a int->fp bitcast in the DAG.
def : Pat<(X86movmsk (v4i32 VR128:$src)),
(MOVMSKPSrr VR128:$src)>;
def : Pat<(X86movmsk (v2i64 VR128:$src)),
(MOVMSKPDrr VR128:$src)>;
}
//===---------------------------------------------------------------------===//
// SSE2 - Packed Integer Logical Instructions
//===---------------------------------------------------------------------===//

View File

@ -29,14 +29,12 @@ define i32 @PR15215_bad(<4 x i32> %input) {
; X32-SSE2-LABEL: PR15215_bad:
; X32-SSE2: # %bb.0: # %entry
; X32-SSE2-NEXT: pslld $31, %xmm0
; X32-SSE2-NEXT: psrad $31, %xmm0
; X32-SSE2-NEXT: movmskps %xmm0, %eax
; X32-SSE2-NEXT: retl
;
; X32-AVX2-LABEL: PR15215_bad:
; X32-AVX2: # %bb.0: # %entry
; X32-AVX2-NEXT: vpslld $31, %xmm0, %xmm0
; X32-AVX2-NEXT: vpsrad $31, %xmm0, %xmm0
; X32-AVX2-NEXT: vmovmskps %xmm0, %eax
; X32-AVX2-NEXT: retl
;
@ -58,14 +56,12 @@ define i32 @PR15215_bad(<4 x i32> %input) {
; X64-SSE2-LABEL: PR15215_bad:
; X64-SSE2: # %bb.0: # %entry
; X64-SSE2-NEXT: pslld $31, %xmm0
; X64-SSE2-NEXT: psrad $31, %xmm0
; X64-SSE2-NEXT: movmskps %xmm0, %eax
; X64-SSE2-NEXT: retq
;
; X64-AVX2-LABEL: PR15215_bad:
; X64-AVX2: # %bb.0: # %entry
; X64-AVX2-NEXT: vpslld $31, %xmm0, %xmm0
; X64-AVX2-NEXT: vpsrad $31, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovmskps %xmm0, %eax
; X64-AVX2-NEXT: retq
entry:

View File

@ -3791,23 +3791,11 @@ define i1 @allzeros_v8i64_and4(<8 x i64> %arg) {
define i32 @movmskpd(<2 x double> %x) {
; SSE2-LABEL: movmskpd:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm1, %xmm0
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE2-NEXT: pand %xmm3, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE2-NEXT: por %xmm0, %xmm1
; SSE2-NEXT: movmskpd %xmm1, %eax
; SSE2-NEXT: movmskpd %xmm0, %eax
; SSE2-NEXT: retq
;
; AVX-LABEL: movmskpd:
; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; AVX-NEXT: vmovmskpd %xmm0, %eax
; AVX-NEXT: retq
;
@ -3827,15 +3815,11 @@ define i32 @movmskpd(<2 x double> %x) {
define i32 @movmskps(<4 x float> %x) {
; SSE2-LABEL: movmskps:
; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
; SSE2-NEXT: movmskps %xmm1, %eax
; SSE2-NEXT: movmskps %xmm0, %eax
; SSE2-NEXT: retq
;
; AVX-LABEL: movmskps:
; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vmovmskps %xmm0, %eax
; AVX-NEXT: retq
;
@ -3878,24 +3862,11 @@ define i32 @movmskpd256(<4 x double> %x) {
; SSE2-NEXT: movmskps %xmm1, %eax
; SSE2-NEXT: retq
;
; AVX1-LABEL: movmskpd256:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vmovmskpd %ymm0, %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: movmskpd256:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vmovmskpd %ymm0, %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
; AVX-LABEL: movmskpd256:
; AVX: # %bb.0:
; AVX-NEXT: vmovmskpd %ymm0, %eax
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; SKX-LABEL: movmskpd256:
; SKX: # %bb.0:
@ -3924,24 +3895,11 @@ define i32 @movmskps256(<8 x float> %x) {
; SSE2-NEXT: movzbl %al, %eax
; SSE2-NEXT: retq
;
; AVX1-LABEL: movmskps256:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpcmpgtd %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpcmpgtd %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vmovmskps %ymm0, %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: movmskps256:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vmovmskps %ymm0, %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
; AVX-LABEL: movmskps256:
; AVX: # %bb.0:
; AVX-NEXT: vmovmskps %ymm0, %eax
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; SKX-LABEL: movmskps256:
; SKX: # %bb.0: