[X86] Add support for turning vXi1 shuffles into KSHIFTL/KSHIFTR.

This patch recognizes shuffles that shift elements and fill with zeros. I've copied and modified the shift matching code we use for normal vector registers to do this. I'm not sure if there's a good way to share more of this code without making the existing function more complex than it already is.

This will be used to enable kshift intrinsics in clang.

Differential Revision: https://reviews.llvm.org/D51401

llvm-svn: 341227
This commit is contained in:
Craig Topper 2018-08-31 17:17:21 +00:00
parent 5b7548c653
commit b7bb9f0078
3 changed files with 118 additions and 210 deletions

View File

@ -15053,6 +15053,39 @@ static SDValue lower512BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
}
}
// Determine if this shuffle can be implemented with a KSHIFT instruction.
// Returns the shift amount if possible or -1 if not. This is a simplified
// version of matchVectorShuffleAsShift.
static int match1BitShuffleAsKSHIFT(unsigned &Opcode, ArrayRef<int> Mask,
int MaskOffset, const APInt &Zeroable) {
int Size = Mask.size();
auto CheckZeros = [&](int Shift, bool Left) {
for (int j = 0; j < Shift; ++j)
if (!Zeroable[j + (Left ? 0 : (Size - Shift))])
return false;
return true;
};
auto MatchShift = [&](int Shift, bool Left) {
unsigned Pos = Left ? Shift : 0;
unsigned Low = Left ? 0 : Shift;
unsigned Len = Size - Shift;
return isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset);
};
for (int Shift = 1; Shift != Size; ++Shift)
for (bool Left : {true, false})
if (CheckZeros(Shift, Left) && MatchShift(Shift, Left)) {
Opcode = Left ? X86ISD::KSHIFTL : X86ISD::KSHIFTR;
return Shift;
}
return -1;
}
// Lower vXi1 vector shuffles.
// There is no a dedicated instruction on AVX-512 that shuffles the masks.
// The only way to shuffle bits is to sign-extend the mask vector to SIMD
@ -15062,6 +15095,9 @@ static SDValue lower1BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Subtarget.hasAVX512() &&
"Cannot lower 512-bit vectors w/o basic ISA!");
unsigned NumElts = Mask.size();
// Try to recognize shuffles that are just padding a subvector with zeros.
@ -15088,9 +15124,21 @@ static SDValue lower1BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
Extract, DAG.getIntPtrConstant(0, DL));
}
// Try to match KSHIFTs.
// TODO: Support narrower than legal shifts by widening and extracting.
if (NumElts >= 16 || (Subtarget.hasDQI() && NumElts == 8)) {
unsigned Offset = 0;
for (SDValue V : { V1, V2 }) {
unsigned Opcode;
int ShiftAmt = match1BitShuffleAsKSHIFT(Opcode, Mask, Offset, Zeroable);
if (ShiftAmt >= 0)
return DAG.getNode(Opcode, DL, VT, V,
DAG.getConstant(ShiftAmt, DL, MVT::i8));
Offset += NumElts; // Increment for next iteration.
}
}
assert(Subtarget.hasAVX512() &&
"Cannot lower 512-bit vectors w/o basic ISA!");
MVT ExtVT;
switch (VT.SimpleTy) {
default:

View File

@ -30,11 +30,8 @@ define <8 x i1> @test2(<2 x i1> %a) {
; CHECK: # %bb.0:
; CHECK-NEXT: vpsllq $63, %xmm0, %xmm0
; CHECK-NEXT: vpmovq2m %xmm0, %k0
; CHECK-NEXT: vpmovm2d %k0, %ymm0
; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
; CHECK-NEXT: vpmovd2m %ymm0, %k0
; CHECK-NEXT: kshiftlb $4, %k0, %k0
; CHECK-NEXT: vpmovm2w %k0, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%res = shufflevector <2 x i1> %a, <2 x i1> zeroinitializer, <8 x i32> <i32 3, i32 3, i32 undef, i32 undef, i32 0, i32 1, i32 undef, i32 undef>
ret <8 x i1> %res

View File

@ -20,11 +20,7 @@ define i8 @kshiftl_v8i1_1(<8 x i64> %x, <8 x i64> %y) {
; SKX-LABEL: kshiftl_v8i1_1:
; SKX: # %bb.0:
; SKX-NEXT: vptestnmq %zmm0, %zmm0, %k0
; SKX-NEXT: vpmovm2d %k0, %ymm0
; SKX-NEXT: movb $-2, %al
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vpexpandd %ymm0, %ymm0 {%k1} {z}
; SKX-NEXT: vpmovd2m %ymm0, %k1
; SKX-NEXT: kshiftlb $1, %k0, %k1
; SKX-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1}
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: # kill: def $al killed $al killed $eax
@ -41,12 +37,8 @@ define i8 @kshiftl_v8i1_1(<8 x i64> %x, <8 x i64> %y) {
define i16 @kshiftl_v16i1_1(<16 x i32> %x, <16 x i32> %y) {
; KNL-LABEL: kshiftl_v16i1_1:
; KNL: # %bb.0:
; KNL-NEXT: vptestnmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: movw $-2, %ax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vptestnmd %zmm0, %zmm0, %k0
; KNL-NEXT: kshiftlw $1, %k0, %k1
; KNL-NEXT: vptestnmd %zmm1, %zmm1, %k0 {%k1}
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: # kill: def $ax killed $ax killed $eax
@ -56,11 +48,7 @@ define i16 @kshiftl_v16i1_1(<16 x i32> %x, <16 x i32> %y) {
; SKX-LABEL: kshiftl_v16i1_1:
; SKX: # %bb.0:
; SKX-NEXT: vptestnmd %zmm0, %zmm0, %k0
; SKX-NEXT: vpmovm2d %k0, %zmm0
; SKX-NEXT: movw $-2, %ax
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
; SKX-NEXT: vpmovd2m %zmm0, %k1
; SKX-NEXT: kshiftlw $1, %k0, %k1
; SKX-NEXT: vptestnmd %zmm1, %zmm1, %k0 {%k1}
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: # kill: def $ax killed $ax killed $eax
@ -86,12 +74,9 @@ define i32 @kshiftl_v32i1_1(<32 x i16> %x, <32 x i16> %y) {
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k2
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; KNL-NEXT: valignd {{.*#+}} zmm1 = zmm0[15],zmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k1
; KNL-NEXT: movw $-2, %ax
; KNL-NEXT: kmovw %eax, %k2
; KNL-NEXT: vpexpandd %zmm0, %zmm0 {%k2} {z}
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k2
; KNL-NEXT: valignd {{.*#+}} zmm0 = zmm0[15],zmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: kshiftlw $1, %k2, %k2
; KNL-NEXT: vpcmpeqw %ymm4, %ymm3, %ymm0
; KNL-NEXT: vpmovsxwd %ymm0, %zmm0
; KNL-NEXT: vpcmpeqw %ymm4, %ymm2, %ymm1
@ -108,11 +93,7 @@ define i32 @kshiftl_v32i1_1(<32 x i16> %x, <32 x i16> %y) {
; SKX-LABEL: kshiftl_v32i1_1:
; SKX: # %bb.0:
; SKX-NEXT: vptestnmw %zmm0, %zmm0, %k0
; SKX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; SKX-NEXT: vpmovm2w %k0, %zmm2
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [32,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
; SKX-NEXT: vpermi2w %zmm0, %zmm2, %zmm3
; SKX-NEXT: vpmovw2m %zmm3, %k1
; SKX-NEXT: kshiftld $1, %k0, %k1
; SKX-NEXT: vptestnmw %zmm1, %zmm1, %k0 {%k1}
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: vzeroupper
@ -149,12 +130,9 @@ define i64 @kshiftl_v64i1_1(<64 x i8> %x, <64 x i8> %y) {
; KNL-NEXT: valignd {{.*#+}} zmm0 = zmm1[15],zmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k2
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k3} {z}
; KNL-NEXT: valignd {{.*#+}} zmm1 = zmm0[15],zmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k3
; KNL-NEXT: movw $-2, %ax
; KNL-NEXT: kmovw %eax, %k4
; KNL-NEXT: vpexpandd %zmm0, %zmm0 {%k4} {z}
; KNL-NEXT: valignd {{.*#+}} zmm0 = zmm0[15],zmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k4
; KNL-NEXT: kshiftlw $1, %k3, %k3
; KNL-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm0
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1
; KNL-NEXT: vpmovsxbd %xmm1, %zmm1
@ -163,9 +141,9 @@ define i64 @kshiftl_v64i1_1(<64 x i8> %x, <64 x i8> %y) {
; KNL-NEXT: vextracti128 $1, %ymm2, %xmm3
; KNL-NEXT: vpmovsxbd %xmm3, %zmm3
; KNL-NEXT: vpmovsxbd %xmm2, %zmm2
; KNL-NEXT: vptestmd %zmm2, %zmm2, %k0 {%k4}
; KNL-NEXT: vptestmd %zmm2, %zmm2, %k0 {%k3}
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: vptestmd %zmm3, %zmm3, %k0 {%k3}
; KNL-NEXT: vptestmd %zmm3, %zmm3, %k0 {%k4}
; KNL-NEXT: kmovw %k0, %ecx
; KNL-NEXT: shll $16, %ecx
; KNL-NEXT: orl %eax, %ecx
@ -183,17 +161,7 @@ define i64 @kshiftl_v64i1_1(<64 x i8> %x, <64 x i8> %y) {
; SKX-LABEL: kshiftl_v64i1_1:
; SKX: # %bb.0:
; SKX-NEXT: vptestnmb %zmm0, %zmm0, %k0
; SKX-NEXT: movl $1, %eax
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: knotd %k1, %k1
; SKX-NEXT: vpmovm2b %k0, %zmm0
; SKX-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
; SKX-NEXT: vpalignr {{.*#+}} ymm2 {%k1} {z} = ymm2[15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],ymm2[31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
; SKX-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; SKX-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm3[0,1]
; SKX-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[15],ymm3[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],ymm0[31],ymm3[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
; SKX-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
; SKX-NEXT: vpmovb2m %zmm0, %k1
; SKX-NEXT: kshiftlq $1, %k0, %k1
; SKX-NEXT: vptestnmb %zmm1, %zmm1, %k0 {%k1}
; SKX-NEXT: kmovq %k0, %rax
; SKX-NEXT: vzeroupper
@ -224,11 +192,7 @@ define i8 @kshiftl_v8i1_7(<8 x i64> %x, <8 x i64> %y) {
; SKX-LABEL: kshiftl_v8i1_7:
; SKX: # %bb.0:
; SKX-NEXT: vptestnmq %zmm0, %zmm0, %k0
; SKX-NEXT: vpmovm2d %k0, %ymm0
; SKX-NEXT: movb $-128, %al
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vpexpandd %ymm0, %ymm0 {%k1} {z}
; SKX-NEXT: vpmovd2m %ymm0, %k1
; SKX-NEXT: kshiftlb $7, %k0, %k1
; SKX-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1}
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: # kill: def $al killed $al killed $eax
@ -245,12 +209,8 @@ define i8 @kshiftl_v8i1_7(<8 x i64> %x, <8 x i64> %y) {
define i16 @kshiftl_v16i1_15(<16 x i32> %x, <16 x i32> %y) {
; KNL-LABEL: kshiftl_v16i1_15:
; KNL: # %bb.0:
; KNL-NEXT: vptestnmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: movw $-32768, %ax # imm = 0x8000
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vptestnmd %zmm0, %zmm0, %k0
; KNL-NEXT: kshiftlw $15, %k0, %k1
; KNL-NEXT: vptestnmd %zmm1, %zmm1, %k0 {%k1}
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: # kill: def $ax killed $ax killed $eax
@ -260,11 +220,7 @@ define i16 @kshiftl_v16i1_15(<16 x i32> %x, <16 x i32> %y) {
; SKX-LABEL: kshiftl_v16i1_15:
; SKX: # %bb.0:
; SKX-NEXT: vptestnmd %zmm0, %zmm0, %k0
; SKX-NEXT: vpmovm2d %k0, %zmm0
; SKX-NEXT: movw $-32768, %ax # imm = 0x8000
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
; SKX-NEXT: vpmovd2m %zmm0, %k1
; SKX-NEXT: kshiftlw $15, %k0, %k1
; SKX-NEXT: vptestnmd %zmm1, %zmm1, %k0 {%k1}
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: # kill: def $ax killed $ax killed $eax
@ -284,12 +240,8 @@ define i32 @kshiftl_v32i1_31(<32 x i16> %x, <32 x i16> %y) {
; KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; KNL-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; KNL-NEXT: vpmovsxwd %ymm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: movw $-32768, %ax # imm = 0x8000
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kshiftlw $15, %k0, %k1
; KNL-NEXT: vpcmpeqw %ymm1, %ymm3, %ymm0
; KNL-NEXT: vpmovsxwd %ymm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
@ -301,11 +253,7 @@ define i32 @kshiftl_v32i1_31(<32 x i16> %x, <32 x i16> %y) {
; SKX-LABEL: kshiftl_v32i1_31:
; SKX: # %bb.0:
; SKX-NEXT: vptestnmw %zmm0, %zmm0, %k0
; SKX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; SKX-NEXT: vpmovm2w %k0, %zmm2
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,32]
; SKX-NEXT: vpermi2w %zmm2, %zmm0, %zmm3
; SKX-NEXT: vpmovw2m %zmm3, %k1
; SKX-NEXT: kshiftld $31, %k0, %k1
; SKX-NEXT: vptestnmw %zmm1, %zmm1, %k0 {%k1}
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: vzeroupper
@ -324,12 +272,8 @@ define i64 @kshiftl_v64i1_63(<64 x i8> %x, <64 x i8> %y) {
; KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; KNL-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: movw $-32768, %ax # imm = 0x8000
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kshiftlw $15, %k0, %k1
; KNL-NEXT: vpcmpeqb %ymm1, %ymm3, %ymm0
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
@ -343,15 +287,7 @@ define i64 @kshiftl_v64i1_63(<64 x i8> %x, <64 x i8> %y) {
; SKX-LABEL: kshiftl_v64i1_63:
; SKX: # %bb.0:
; SKX-NEXT: vptestnmb %zmm0, %zmm0, %k0
; SKX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; SKX-NEXT: vpmovm2b %k0, %zmm2
; SKX-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0]
; SKX-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
; SKX-NEXT: movl $-2147483648, %eax # imm = 0x80000000
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vmovdqu8 %ymm2, %ymm2 {%k1} {z}
; SKX-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; SKX-NEXT: vpmovb2m %zmm0, %k1
; SKX-NEXT: kshiftlq $63, %k0, %k1
; SKX-NEXT: vptestnmb %zmm1, %zmm1, %k0 {%k1}
; SKX-NEXT: kmovq %k0, %rax
; SKX-NEXT: vzeroupper
@ -382,11 +318,7 @@ define i8 @kshiftr_v8i1_1(<8 x i64> %x, <8 x i64> %y) {
; SKX-LABEL: kshiftr_v8i1_1:
; SKX: # %bb.0:
; SKX-NEXT: vptestnmq %zmm0, %zmm0, %k0
; SKX-NEXT: vpmovm2d %k0, %ymm0
; SKX-NEXT: valignd {{.*#+}} ymm0 = ymm0[1,2,3,4,5,6,7,0]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm2[7]
; SKX-NEXT: vpmovd2m %ymm0, %k1
; SKX-NEXT: kshiftrb $1, %k0, %k1
; SKX-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1}
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: # kill: def $al killed $al killed $eax
@ -403,12 +335,8 @@ define i8 @kshiftr_v8i1_1(<8 x i64> %x, <8 x i64> %y) {
define i16 @kshiftr_v16i1_1(<16 x i32> %x, <16 x i32> %y) {
; KNL-LABEL: kshiftr_v16i1_1:
; KNL: # %bb.0:
; KNL-NEXT: vptestnmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpxor %xmm0, %xmm0, %xmm0
; KNL-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
; KNL-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,31]
; KNL-NEXT: vpermi2d %zmm0, %zmm2, %zmm3
; KNL-NEXT: vptestmd %zmm3, %zmm3, %k1
; KNL-NEXT: vptestnmd %zmm0, %zmm0, %k0
; KNL-NEXT: kshiftrw $1, %k0, %k1
; KNL-NEXT: vptestnmd %zmm1, %zmm1, %k0 {%k1}
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: # kill: def $ax killed $ax killed $eax
@ -418,11 +346,7 @@ define i16 @kshiftr_v16i1_1(<16 x i32> %x, <16 x i32> %y) {
; SKX-LABEL: kshiftr_v16i1_1:
; SKX: # %bb.0:
; SKX-NEXT: vptestnmd %zmm0, %zmm0, %k0
; SKX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; SKX-NEXT: vpmovm2d %k0, %zmm2
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,31]
; SKX-NEXT: vpermi2d %zmm0, %zmm2, %zmm3
; SKX-NEXT: vpmovd2m %zmm3, %k1
; SKX-NEXT: kshiftrw $1, %k0, %k1
; SKX-NEXT: vptestnmd %zmm1, %zmm1, %k0 {%k1}
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: # kill: def $ax killed $ax killed $eax
@ -449,20 +373,17 @@ define i32 @kshiftr_v32i1_1(<32 x i16> %x, <32 x i16> %y) {
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; KNL-NEXT: valignd {{.*#+}} zmm0 = zmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm1[0]
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vmovdqa64 {{.*#+}} zmm0 = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,31]
; KNL-NEXT: vpxor %xmm5, %xmm5, %xmm5
; KNL-NEXT: vpermi2d %zmm5, %zmm1, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k2
; KNL-NEXT: vpcmpeqw %ymm4, %ymm3, %ymm0
; KNL-NEXT: kshiftrw $1, %k1, %k1
; KNL-NEXT: vpcmpeqw %ymm4, %ymm2, %ymm0
; KNL-NEXT: vpmovsxwd %ymm0, %zmm0
; KNL-NEXT: vpcmpeqw %ymm4, %ymm2, %ymm1
; KNL-NEXT: vpcmpeqw %ymm4, %ymm3, %ymm1
; KNL-NEXT: vpmovsxwd %ymm1, %zmm1
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0 {%k1}
; KNL-NEXT: kmovw %k0, %ecx
; KNL-NEXT: shll $16, %ecx
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k2}
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: shll $16, %eax
; KNL-NEXT: orl %ecx, %eax
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
@ -470,11 +391,7 @@ define i32 @kshiftr_v32i1_1(<32 x i16> %x, <32 x i16> %y) {
; SKX-LABEL: kshiftr_v32i1_1:
; SKX: # %bb.0:
; SKX-NEXT: vptestnmw %zmm0, %zmm0, %k0
; SKX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; SKX-NEXT: vpmovm2w %k0, %zmm2
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,63]
; SKX-NEXT: vpermi2w %zmm0, %zmm2, %zmm3
; SKX-NEXT: vpmovw2m %zmm3, %k1
; SKX-NEXT: kshiftrd $1, %k0, %k1
; SKX-NEXT: vptestnmw %zmm1, %zmm1, %k0 {%k1}
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: vzeroupper
@ -491,53 +408,50 @@ define i64 @kshiftr_v64i1_1(<64 x i8> %x, <64 x i8> %y) {
; KNL-LABEL: kshiftr_v64i1_1:
; KNL: # %bb.0:
; KNL-NEXT: vpxor %xmm4, %xmm4, %xmm4
; KNL-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm1
; KNL-NEXT: vextracti128 $1, %ymm1, %xmm5
; KNL-NEXT: vpmovsxbd %xmm5, %zmm5
; KNL-NEXT: vptestmd %zmm5, %zmm5, %k3
; KNL-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm0
; KNL-NEXT: vpmovsxbd %xmm0, %zmm5
; KNL-NEXT: vptestmd %zmm5, %zmm5, %k2
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k3
; KNL-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm0
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1
; KNL-NEXT: vpmovsxbd %xmm1, %zmm1
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k1
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k4
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k4} {z}
; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; KNL-NEXT: valignd {{.*#+}} zmm5 = zmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm1[0]
; KNL-NEXT: vptestmd %zmm5, %zmm5, %k1
; KNL-NEXT: vpternlogd $255, %zmm5, %zmm5, %zmm5 {%k3} {z}
; KNL-NEXT: vpternlogd $255, %zmm5, %zmm5, %zmm5 {%k2} {z}
; KNL-NEXT: valignd {{.*#+}} zmm0 = zmm5[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm0[0]
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k3
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
; KNL-NEXT: valignd {{.*#+}} zmm0 = zmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm5[0]
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k4
; KNL-NEXT: vmovdqa64 {{.*#+}} zmm0 = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,31]
; KNL-NEXT: vpxor %xmm5, %xmm5, %xmm5
; KNL-NEXT: vpermi2d %zmm5, %zmm1, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k2
; KNL-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm0
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k3} {z}
; KNL-NEXT: valignd {{.*#+}} zmm0 = zmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zmm0[0]
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k4
; KNL-NEXT: kshiftrw $1, %k3, %k3
; KNL-NEXT: vpcmpeqb %ymm4, %ymm2, %ymm0
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1
; KNL-NEXT: vpmovsxbd %xmm1, %zmm1
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; KNL-NEXT: vpcmpeqb %ymm4, %ymm2, %ymm2
; KNL-NEXT: vextracti128 $1, %ymm2, %xmm3
; KNL-NEXT: vpmovsxbd %xmm3, %zmm3
; KNL-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm2
; KNL-NEXT: vpmovsxbd %xmm2, %zmm3
; KNL-NEXT: vextracti128 $1, %ymm2, %xmm2
; KNL-NEXT: vpmovsxbd %xmm2, %zmm2
; KNL-NEXT: vptestmd %zmm2, %zmm2, %k0 {%k4}
; KNL-NEXT: vptestmd %zmm2, %zmm2, %k0 {%k3}
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: vptestmd %zmm3, %zmm3, %k0 {%k3}
; KNL-NEXT: shll $16, %eax
; KNL-NEXT: vptestmd %zmm3, %zmm3, %k0 {%k4}
; KNL-NEXT: kmovw %k0, %ecx
; KNL-NEXT: shll $16, %ecx
; KNL-NEXT: orl %eax, %ecx
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
; KNL-NEXT: shlq $32, %rcx
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k2}
; KNL-NEXT: kmovw %k0, %edx
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0 {%k2}
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0 {%k1}
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: shll $16, %eax
; KNL-NEXT: orl %edx, %eax
; KNL-NEXT: shlq $32, %rax
; KNL-NEXT: orq %rcx, %rax
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
@ -545,17 +459,7 @@ define i64 @kshiftr_v64i1_1(<64 x i8> %x, <64 x i8> %y) {
; SKX-LABEL: kshiftr_v64i1_1:
; SKX: # %bb.0:
; SKX-NEXT: vptestnmb %zmm0, %zmm0, %k0
; SKX-NEXT: movl $-2147483648, %eax # imm = 0x80000000
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: knotd %k1, %k1
; SKX-NEXT: vpmovm2b %k0, %zmm0
; SKX-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; SKX-NEXT: vpermq {{.*#+}} ymm3 = ymm2[2,3,0,1]
; SKX-NEXT: vpalignr {{.*#+}} ymm3 {%k1} {z} = ymm2[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm3[0],ymm2[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm3[16]
; SKX-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm2[0,1]
; SKX-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm2[0],ymm0[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm2[16]
; SKX-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
; SKX-NEXT: vpmovb2m %zmm0, %k1
; SKX-NEXT: kshiftrq $1, %k0, %k1
; SKX-NEXT: vptestnmb %zmm1, %zmm1, %k0 {%k1}
; SKX-NEXT: kmovq %k0, %rax
; SKX-NEXT: vzeroupper
@ -586,11 +490,7 @@ define i8 @kshiftr_v8i1_7(<8 x i64> %x, <8 x i64> %y) {
; SKX-LABEL: kshiftr_v8i1_7:
; SKX: # %bb.0:
; SKX-NEXT: vptestnmq %zmm0, %zmm0, %k0
; SKX-NEXT: vpmovm2d %k0, %ymm0
; SKX-NEXT: movb $-2, %al
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vpexpandd %ymm0, %ymm0 {%k1} {z}
; SKX-NEXT: vpmovd2m %ymm0, %k1
; SKX-NEXT: kshiftlb $1, %k0, %k1
; SKX-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1}
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: # kill: def $al killed $al killed $eax
@ -607,12 +507,8 @@ define i8 @kshiftr_v8i1_7(<8 x i64> %x, <8 x i64> %y) {
define i16 @kshiftr_v16i1_15(<16 x i32> %x, <16 x i32> %y) {
; KNL-LABEL: kshiftr_v16i1_15:
; KNL: # %bb.0:
; KNL-NEXT: vptestnmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpxor %xmm0, %xmm0, %xmm0
; KNL-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
; KNL-NEXT: vmovdqa64 {{.*#+}} zmm3 = [31,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; KNL-NEXT: vpermi2d %zmm2, %zmm0, %zmm3
; KNL-NEXT: vptestmd %zmm3, %zmm3, %k1
; KNL-NEXT: vptestnmd %zmm0, %zmm0, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k1
; KNL-NEXT: vptestnmd %zmm1, %zmm1, %k0 {%k1}
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: # kill: def $ax killed $ax killed $eax
@ -622,11 +518,7 @@ define i16 @kshiftr_v16i1_15(<16 x i32> %x, <16 x i32> %y) {
; SKX-LABEL: kshiftr_v16i1_15:
; SKX: # %bb.0:
; SKX-NEXT: vptestnmd %zmm0, %zmm0, %k0
; SKX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; SKX-NEXT: vpmovm2d %k0, %zmm2
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [31,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; SKX-NEXT: vpermi2d %zmm2, %zmm0, %zmm3
; SKX-NEXT: vpmovd2m %zmm3, %k1
; SKX-NEXT: kshiftrw $15, %k0, %k1
; SKX-NEXT: vptestnmd %zmm1, %zmm1, %k0 {%k1}
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: # kill: def $ax killed $ax killed $eax
@ -646,12 +538,8 @@ define i32 @kshiftr_v32i1_31(<32 x i16> %x, <32 x i16> %y) {
; KNL-NEXT: vpxor %xmm0, %xmm0, %xmm0
; KNL-NEXT: vpcmpeqw %ymm0, %ymm1, %ymm1
; KNL-NEXT: vpmovsxwd %ymm1, %zmm1
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k1
; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; KNL-NEXT: vmovdqa64 {{.*#+}} zmm3 = [31,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; KNL-NEXT: vpxor %xmm4, %xmm4, %xmm4
; KNL-NEXT: vpermt2d %zmm1, %zmm3, %zmm4
; KNL-NEXT: vptestmd %zmm4, %zmm4, %k1
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k1
; KNL-NEXT: vpcmpeqw %ymm0, %ymm2, %ymm0
; KNL-NEXT: vpmovsxwd %ymm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
@ -662,11 +550,7 @@ define i32 @kshiftr_v32i1_31(<32 x i16> %x, <32 x i16> %y) {
; SKX-LABEL: kshiftr_v32i1_31:
; SKX: # %bb.0:
; SKX-NEXT: vptestnmw %zmm0, %zmm0, %k0
; SKX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; SKX-NEXT: vpmovm2w %k0, %zmm2
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm3 = [63,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
; SKX-NEXT: vpermi2w %zmm2, %zmm0, %zmm3
; SKX-NEXT: vpmovw2m %zmm3, %k1
; SKX-NEXT: kshiftrd $31, %k0, %k1
; SKX-NEXT: vptestnmw %zmm1, %zmm1, %k0 {%k1}
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: vzeroupper
@ -686,12 +570,8 @@ define i64 @kshiftr_v64i1_63(<64 x i8> %x, <64 x i8> %y) {
; KNL-NEXT: vpcmpeqb %ymm0, %ymm1, %ymm1
; KNL-NEXT: vextracti128 $1, %ymm1, %xmm1
; KNL-NEXT: vpmovsxbd %xmm1, %zmm1
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k1
; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; KNL-NEXT: vmovdqa64 {{.*#+}} zmm3 = [31,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; KNL-NEXT: vpxor %xmm4, %xmm4, %xmm4
; KNL-NEXT: vpermt2d %zmm1, %zmm3, %zmm4
; KNL-NEXT: vptestmd %zmm4, %zmm4, %k1
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k1
; KNL-NEXT: vpcmpeqb %ymm0, %ymm2, %ymm0
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
@ -703,14 +583,7 @@ define i64 @kshiftr_v64i1_63(<64 x i8> %x, <64 x i8> %y) {
; SKX-LABEL: kshiftr_v64i1_63:
; SKX: # %bb.0:
; SKX-NEXT: vptestnmb %zmm0, %zmm0, %k0
; SKX-NEXT: vpmovm2b %k0, %zmm0
; SKX-NEXT: vextracti64x4 $1, %zmm0, %ymm0
; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0
; SKX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SKX-NEXT: movl $1, %eax
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vmovdqu8 %ymm0, %ymm0 {%k1} {z}
; SKX-NEXT: vpmovb2m %zmm0, %k1
; SKX-NEXT: kshiftrq $63, %k0, %k1
; SKX-NEXT: vptestnmb %zmm1, %zmm1, %k0 {%k1}
; SKX-NEXT: kmovq %k0, %rax
; SKX-NEXT: vzeroupper
@ -742,11 +615,7 @@ define i8 @kshiftl_v8i1_zu123u56(<8 x i64> %x, <8 x i64> %y) {
; SKX-LABEL: kshiftl_v8i1_zu123u56:
; SKX: # %bb.0:
; SKX-NEXT: vptestnmq %zmm0, %zmm0, %k0
; SKX-NEXT: vpmovm2d %k0, %ymm0
; SKX-NEXT: valignd {{.*#+}} ymm0 = ymm0[7,0,1,2,3,4,5,6]
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3,4,5,6,7]
; SKX-NEXT: vpmovd2m %ymm0, %k1
; SKX-NEXT: kshiftlb $1, %k0, %k1
; SKX-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1}
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: # kill: def $al killed $al killed $eax
@ -777,9 +646,7 @@ define i8 @kshiftl_v8i1_u0123456(<8 x i64> %x, <8 x i64> %y) {
; SKX-LABEL: kshiftl_v8i1_u0123456:
; SKX: # %bb.0:
; SKX-NEXT: vptestnmq %zmm0, %zmm0, %k0
; SKX-NEXT: vpmovm2d %k0, %ymm0
; SKX-NEXT: valignd {{.*#+}} ymm0 = ymm0[7,0,1,2,3,4,5,6]
; SKX-NEXT: vpmovd2m %ymm0, %k1
; SKX-NEXT: kshiftlb $1, %k0, %k1
; SKX-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1}
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: # kill: def $al killed $al killed $eax
@ -812,9 +679,7 @@ define i8 @kshiftr_v8i1_1u3u567z(<8 x i64> %x, <8 x i64> %y) {
; SKX-LABEL: kshiftr_v8i1_1u3u567z:
; SKX: # %bb.0:
; SKX-NEXT: vptestnmq %zmm0, %zmm0, %k0
; SKX-NEXT: vpmovm2d %k0, %ymm0
; SKX-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,ymm0[20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero
; SKX-NEXT: vpmovd2m %ymm0, %k1
; SKX-NEXT: kshiftrb $1, %k0, %k1
; SKX-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1}
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: # kill: def $al killed $al killed $eax
@ -845,9 +710,7 @@ define i8 @kshiftr_v8i1_234567uu(<8 x i64> %x, <8 x i64> %y) {
; SKX-LABEL: kshiftr_v8i1_234567uu:
; SKX: # %bb.0:
; SKX-NEXT: vptestnmq %zmm0, %zmm0, %k0
; SKX-NEXT: vpmovm2d %k0, %ymm0
; SKX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,2,3,3]
; SKX-NEXT: vpmovd2m %ymm0, %k1
; SKX-NEXT: kshiftrb $2, %k0, %k1
; SKX-NEXT: vptestnmq %zmm1, %zmm1, %k0 {%k1}
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: # kill: def $al killed $al killed $eax