[X86][SSE] Add support for extending bool vectors bitcasted from scalars

This patch acts as a reverse to combineBitcastvxi1 - bitcasting a scalar integer to a boolean vector and extending it 'in place' to the requested legal type.

Currently this doesn't handle AVX512 at all - but the current mask register approach is lacking for some cases.

Differential Revision: https://reviews.llvm.org/D35320

llvm-svn: 314076
This commit is contained in:
Simon Pilgrim 2017-09-24 13:42:31 +00:00
parent ee457594c2
commit a705db9a9e
4 changed files with 1212 additions and 6582 deletions

View File

@ -17274,6 +17274,24 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
DAG.getConstant(CmpMode, dl, MVT::i8));
}
// (X & Y) != 0 --> (X & Y) == Y iff Y is power-of-2.
// Revert part of the simplifySetCCWithAnd combine, to avoid an invert.
if (Cond == ISD::SETNE && ISD::isBuildVectorAllZeros(Op1.getNode())) {
SDValue BC0 = peekThroughBitcasts(Op0);
if (BC0.getOpcode() == ISD::AND) {
APInt UndefElts;
SmallVector<APInt, 64> EltBits;
if (getTargetConstantBitsFromNode(BC0.getOperand(1),
VT.getScalarSizeInBits(), UndefElts,
EltBits, false, false)) {
if (llvm::all_of(EltBits, [](APInt &V) { return V.isPowerOf2(); })) {
Cond = ISD::SETEQ;
Op1 = DAG.getBitcast(VT, BC0.getOperand(1));
}
}
}
}
// We are handling one of the integer comparisons here. Since SSE only has
// GT and EQ comparisons for integer, swapping operands and multiple
// operations may be required for some comparisons.
@ -34480,6 +34498,95 @@ static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) {
CMovN.getOperand(2), CMovN.getOperand(3));
}
// Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
// This is more or less the reverse of combineBitcastvxi1.
static SDValue
combineToExtendBoolVectorInReg(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
unsigned Opcode = N->getOpcode();
if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
Opcode != ISD::ANY_EXTEND)
return SDValue();
if (!DCI.isBeforeLegalizeOps())
return SDValue();
if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
return SDValue();
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT SVT = VT.getScalarType();
EVT InSVT = N0.getValueType().getScalarType();
unsigned EltSizeInBits = SVT.getSizeInBits();
// Input type must be extending a bool vector (bit-casted from a scalar
// integer) to legal integer types.
if (!VT.isVector())
return SDValue();
if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
return SDValue();
if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
return SDValue();
SDValue N00 = N0.getOperand(0);
EVT SclVT = N0.getOperand(0).getValueType();
if (!SclVT.isScalarInteger())
return SDValue();
SDLoc DL(N);
SDValue Vec;
SmallVector<int, 32> ShuffleMask;
unsigned NumElts = VT.getVectorNumElements();
assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");
// Broadcast the scalar integer to the vector elements.
if (NumElts > EltSizeInBits) {
// If the scalar integer is greater than the vector element size, then we
// must split it down into sub-sections for broadcasting. For example:
// i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
// i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
unsigned Scale = NumElts / EltSizeInBits;
EVT BroadcastVT =
EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
Vec = DAG.getBitcast(VT, Vec);
for (unsigned i = 0; i != Scale; ++i)
ShuffleMask.append(EltSizeInBits, i);
} else {
// For smaller scalar integers, we can simply any-extend it to the vector
// element size (we don't care about the upper bits) and broadcast it to all
// elements.
SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
ShuffleMask.append(NumElts, 0);
}
Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
// Now, mask the relevant bit in each element.
SmallVector<SDValue, 32> Bits;
for (int i = 0; i != NumElts; ++i) {
int BitIdx = (i % EltSizeInBits);
APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
Bits.push_back(DAG.getConstant(Bit, DL, SVT));
}
SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
// Compare against the bitmask and extend the result.
EVT CCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
// For SEXT, this is now done, otherwise shift the result down for
// zero-extension.
if (Opcode == ISD::SIGN_EXTEND)
return Vec;
return DAG.getNode(ISD::SRL, DL, VT, Vec,
DAG.getConstant(EltSizeInBits - 1, DL, VT));
}
/// Convert a SEXT or ZEXT of a vector to a SIGN_EXTEND_VECTOR_INREG or
/// ZERO_EXTEND_VECTOR_INREG, this requires the splitting (or concatenating
/// with UNDEFs) of the input to vectors of the same size as the target type
@ -34619,6 +34726,9 @@ static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget))
return V;
if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
return V;
if (Subtarget.hasAVX() && VT.is256BitVector())
if (SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget))
return R;
@ -34755,6 +34865,9 @@ static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget))
return V;
if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
return V;
if (VT.is256BitVector())
if (SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget))
return R;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -8,29 +8,38 @@
define <2 x i1> @bitcast_i2_2i1(i2 zeroext %a0) {
; SSE2-SSSE3-LABEL: bitcast_i2_2i1:
; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movq %rcx, %xmm0
; SSE2-SSSE3-NEXT: shrl %eax
; SSE2-SSSE3-NEXT: andl $1, %eax
; SSE2-SSSE3-NEXT: movq %rax, %xmm1
; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm1
; SSE2-SSSE3-NEXT: pcmpeqd %xmm0, %xmm1
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,3,2]
; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
; SSE2-SSSE3-NEXT: psrlq $63, %xmm0
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: bitcast_i2_2i1:
; AVX12: # BB#0:
; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: andl $1, %ecx
; AVX12-NEXT: vmovq %rcx, %xmm0
; AVX12-NEXT: shrl %eax
; AVX12-NEXT: andl $1, %eax
; AVX12-NEXT: vmovq %rax, %xmm1
; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX12-NEXT: retq
; AVX1-LABEL: bitcast_i2_2i1:
; AVX1: # BB#0:
; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsrlq $63, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: bitcast_i2_2i1:
; AVX2: # BB#0:
; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpsrlq $63, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: bitcast_i2_2i1:
; AVX512: # BB#0:
@ -48,54 +57,32 @@ define <2 x i1> @bitcast_i2_2i1(i2 zeroext %a0) {
define <4 x i1> @bitcast_i4_4i1(i4 zeroext %a0) {
; SSE2-SSSE3-LABEL: bitcast_i4_4i1:
; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; SSE2-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: shrl $3, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: shrl $2, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE2-SSSE3-NEXT: movd %eax, %xmm0
; SSE2-SSSE3-NEXT: shrl %eax
; SSE2-SSSE3-NEXT: movd %eax, %xmm2
; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-SSSE3-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-SSSE3-NEXT: movd %edi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8]
; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: psrld $31, %xmm0
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: bitcast_i4_4i1:
; AVX1: # BB#0:
; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; AVX1-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; AVX1-NEXT: movl %eax, %ecx
; AVX1-NEXT: shrl %ecx
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
; AVX1-NEXT: movl %eax, %ecx
; AVX1-NEXT: shrl $2, %ecx
; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
; AVX1-NEXT: shrl $3, %eax
; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8]
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsrld $31, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: bitcast_i4_4i1:
; AVX2: # BB#0:
; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; AVX2-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; AVX2-NEXT: movl %eax, %ecx
; AVX2-NEXT: shrl %ecx
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
; AVX2-NEXT: movl %eax, %ecx
; AVX2-NEXT: shrl $2, %ecx
; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
; AVX2-NEXT: shrl $3, %eax
; AVX2-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastd %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpsrld $31, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: bitcast_i4_4i1:
@ -115,82 +102,35 @@ define <4 x i1> @bitcast_i4_4i1(i4 zeroext %a0) {
define <8 x i1> @bitcast_i8_8i1(i8 zeroext %a0) {
; SSE2-SSSE3-LABEL: bitcast_i8_8i1:
; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: shrl $3, %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: shrl $2, %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: shrl %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: shrl $5, %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: shrl $4, %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: shrl $6, %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
; SSE2-SSSE3-NEXT: shrl $7, %eax
; SSE2-SSSE3-NEXT: movzwl %ax, %eax
; SSE2-SSSE3-NEXT: movd %eax, %xmm3
; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE2-SSSE3-NEXT: movd %edi, %xmm0
; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pcmpeqw %xmm1, %xmm0
; SSE2-SSSE3-NEXT: psrlw $15, %xmm0
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: bitcast_i8_8i1:
; AVX12: # BB#0:
; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: shrl %ecx
; AVX12-NEXT: andl $1, %ecx
; AVX12-NEXT: movl %eax, %edx
; AVX12-NEXT: andl $1, %edx
; AVX12-NEXT: vmovd %edx, %xmm0
; AVX12-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0
; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: shrl $2, %ecx
; AVX12-NEXT: andl $1, %ecx
; AVX12-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0
; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: shrl $3, %ecx
; AVX12-NEXT: andl $1, %ecx
; AVX12-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: shrl $4, %ecx
; AVX12-NEXT: andl $1, %ecx
; AVX12-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0
; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: shrl $5, %ecx
; AVX12-NEXT: andl $1, %ecx
; AVX12-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: shrl $6, %ecx
; AVX12-NEXT: andl $1, %ecx
; AVX12-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
; AVX12-NEXT: shrl $7, %eax
; AVX12-NEXT: movzwl %ax, %eax
; AVX12-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
; AVX12-NEXT: retq
; AVX1-LABEL: bitcast_i8_8i1:
; AVX1: # BB#0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: bitcast_i8_8i1:
; AVX2: # BB#0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: bitcast_i8_8i1:
; AVX512: # BB#0:
@ -202,156 +142,51 @@ define <8 x i1> @bitcast_i8_8i1(i8 zeroext %a0) {
}
define <16 x i1> @bitcast_i16_16i1(i16 zeroext %a0) {
; SSE2-SSSE3-LABEL: bitcast_i16_16i1:
; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: shrl $7, %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: shrl $6, %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: shrl $5, %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: shrl $4, %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: shrl $3, %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: shrl $2, %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: shrl %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: shrl $11, %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: shrl $10, %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: shrl $9, %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: shrl $8, %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: shrl $13, %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: shrl $12, %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
; SSE2-SSSE3-NEXT: movl %eax, %ecx
; SSE2-SSSE3-NEXT: shrl $14, %ecx
; SSE2-SSSE3-NEXT: andl $1, %ecx
; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
; SSE2-SSSE3-NEXT: shrl $15, %eax
; SSE2-SSSE3-NEXT: movzwl %ax, %eax
; SSE2-SSSE3-NEXT: movd %eax, %xmm4
; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-SSSE3-NEXT: retq
; SSE2-LABEL: bitcast_i16_16i1:
; SSE2: # BB#0:
; SSE2-NEXT: movd %edi, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: pcmpeqb %xmm1, %xmm0
; SSE2-NEXT: psrlw $7, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; AVX12-LABEL: bitcast_i16_16i1:
; AVX12: # BB#0:
; AVX12-NEXT: movw %di, -{{[0-9]+}}(%rsp)
; AVX12-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: shrl %ecx
; AVX12-NEXT: andl $1, %ecx
; AVX12-NEXT: movl %eax, %edx
; AVX12-NEXT: andl $1, %edx
; AVX12-NEXT: vmovd %edx, %xmm0
; AVX12-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: shrl $2, %ecx
; AVX12-NEXT: andl $1, %ecx
; AVX12-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: shrl $3, %ecx
; AVX12-NEXT: andl $1, %ecx
; AVX12-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0
; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: shrl $4, %ecx
; AVX12-NEXT: andl $1, %ecx
; AVX12-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: shrl $5, %ecx
; AVX12-NEXT: andl $1, %ecx
; AVX12-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0
; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: shrl $6, %ecx
; AVX12-NEXT: andl $1, %ecx
; AVX12-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: shrl $7, %ecx
; AVX12-NEXT: andl $1, %ecx
; AVX12-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0
; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: shrl $8, %ecx
; AVX12-NEXT: andl $1, %ecx
; AVX12-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: shrl $9, %ecx
; AVX12-NEXT: andl $1, %ecx
; AVX12-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: shrl $10, %ecx
; AVX12-NEXT: andl $1, %ecx
; AVX12-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: shrl $11, %ecx
; AVX12-NEXT: andl $1, %ecx
; AVX12-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: shrl $12, %ecx
; AVX12-NEXT: andl $1, %ecx
; AVX12-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: shrl $13, %ecx
; AVX12-NEXT: andl $1, %ecx
; AVX12-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: shrl $14, %ecx
; AVX12-NEXT: andl $1, %ecx
; AVX12-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
; AVX12-NEXT: shrl $15, %eax
; AVX12-NEXT: movzwl %ax, %eax
; AVX12-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
; AVX12-NEXT: retq
; SSSE3-LABEL: bitcast_i16_16i1:
; SSSE3: # BB#0:
; SSSE3-NEXT: movd %edi, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
; SSSE3-NEXT: pand %xmm1, %xmm0
; SSSE3-NEXT: pcmpeqb %xmm1, %xmm0
; SSSE3-NEXT: psrlw $7, %xmm0
; SSSE3-NEXT: pand {{.*}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
; AVX1-LABEL: bitcast_i16_16i1:
; AVX1: # BB#0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: bitcast_i16_16i1:
; AVX2: # BB#0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [9241421688590303745,9241421688590303745]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpsrlw $7, %xmm0, %xmm0
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: bitcast_i16_16i1:
; AVX512: # BB#0:
@ -371,286 +206,43 @@ define <32 x i1> @bitcast_i32_32i1(i32 %a0) {
;
; AVX1-LABEL: bitcast_i32_32i1:
; AVX1: # BB#0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: .Lcfi0:
; AVX1-NEXT: .cfi_def_cfa_offset 16
; AVX1-NEXT: .Lcfi1:
; AVX1-NEXT: .cfi_offset %rbp, -16
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: .Lcfi2:
; AVX1-NEXT: .cfi_def_cfa_register %rbp
; AVX1-NEXT: andq $-32, %rsp
; AVX1-NEXT: subq $32, %rsp
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $17, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: movl %edi, %ecx
; AVX1-NEXT: shrl $16, %ecx
; AVX1-NEXT: andl $1, %ecx
; AVX1-NEXT: vmovd %ecx, %xmm0
; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $18, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $19, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $20, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $21, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $22, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $23, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $24, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $25, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $26, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $27, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $28, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $29, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $30, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $31, %eax
; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: movl %edi, %ecx
; AVX1-NEXT: andl $1, %ecx
; AVX1-NEXT: vmovd %ecx, %xmm1
; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $2, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $3, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $4, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $5, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $6, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $7, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $8, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $9, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $10, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $11, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $12, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $13, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: shrl $14, %eax
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
; AVX1-NEXT: shrl $15, %edi
; AVX1-NEXT: andl $1, %edi
; AVX1-NEXT: vpinsrb $15, %edi, %xmm1, %xmm1
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,1,1,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,2,3,3,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: movq %rbp, %rsp
; AVX1-NEXT: popq %rbp
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpsrlw $7, %xmm1, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: bitcast_i32_32i1:
; AVX2: # BB#0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: .Lcfi0:
; AVX2-NEXT: .cfi_def_cfa_offset 16
; AVX2-NEXT: .Lcfi1:
; AVX2-NEXT: .cfi_offset %rbp, -16
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: .Lcfi2:
; AVX2-NEXT: .cfi_def_cfa_register %rbp
; AVX2-NEXT: andq $-32, %rsp
; AVX2-NEXT: subq $32, %rsp
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $17, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: movl %edi, %ecx
; AVX2-NEXT: shrl $16, %ecx
; AVX2-NEXT: andl $1, %ecx
; AVX2-NEXT: vmovd %ecx, %xmm0
; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $18, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $19, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $20, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $21, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $22, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $23, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $24, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $25, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $26, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $27, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $28, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $29, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $30, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $31, %eax
; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: movl %edi, %ecx
; AVX2-NEXT: andl $1, %ecx
; AVX2-NEXT: vmovd %ecx, %xmm1
; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $2, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $3, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $4, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $5, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $6, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $7, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $8, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $9, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $10, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $11, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $12, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $13, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: shrl $14, %eax
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
; AVX2-NEXT: shrl $15, %edi
; AVX2-NEXT: andl $1, %edi
; AVX2-NEXT: vpinsrb $15, %edi, %xmm1, %xmm1
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,1,1,4,5,6,7]
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,2,3,3,4,5,6,7]
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: movq %rbp, %rsp
; AVX2-NEXT: popq %rbp
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9241421688590303745,9241421688590303745,9241421688590303745,9241421688590303745]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm0
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: bitcast_i32_32i1: