[CodeGenPrepare][x86] shift both sides of a vector select when profitable

This is based on the example/discussion in PR37428:
https://bugs.llvm.org/show_bug.cgi?id=37428

Proper vector shift instructions don't appear until AVX2, so we may generate several
extra instructions within a loop trying to compensate for that. It's difficult to
recover from that shift expansion later than this, so use the existing TLI hook and
splat analysis to enable better codegen.

This extends CGP functionality introduced with:
rL201655

Differential Revision: https://reviews.llvm.org/D63233

llvm-svn: 363511
This commit is contained in:
Sanjay Patel 2019-06-16 15:29:03 +00:00
parent 33b46a6df0
commit c8d88ad1a9
4 changed files with 286 additions and 284 deletions

View File

@ -32,6 +32,7 @@
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
@ -363,6 +364,7 @@ class TypePromotionTransaction;
bool optimizeExt(Instruction *&I);
bool optimizeExtUses(Instruction *I);
bool optimizeLoadExt(LoadInst *Load);
bool optimizeShiftInst(BinaryOperator *BO);
bool optimizeSelectInst(SelectInst *SI);
bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI);
bool optimizeSwitchInst(SwitchInst *SI);
@ -5917,6 +5919,39 @@ static Value *getTrueOrFalseValue(
return V;
}
bool CodeGenPrepare::optimizeShiftInst(BinaryOperator *Shift) {
assert(Shift->isShift() && "Expected a shift");
// If this is (1) a vector shift, (2) shifts by scalars are cheaper than
// general vector shifts, and (3) the shift amount is a select-of-splatted
// values, hoist the shifts before the select:
// shift Op0, (select Cond, TVal, FVal) -->
// select Cond, (shift Op0, TVal), (shift Op0, FVal)
//
// This is inverting a generic IR transform when we know that the cost of a
// general vector shift is more than the cost of 2 shift-by-scalars.
// We can't do this effectively in SDAG because we may not be able to
// determine if the select operands are splats from within a basic block.
Type *Ty = Shift->getType();
if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty))
return false;
Value *Cond, *TVal, *FVal;
if (!match(Shift->getOperand(1),
m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
return false;
if (!isSplatValue(TVal) || !isSplatValue(FVal))
return false;
IRBuilder<> Builder(Shift);
BinaryOperator::BinaryOps Opcode = Shift->getOpcode();
Value *NewTVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), TVal);
Value *NewFVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), FVal);
Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal);
Shift->replaceAllUsesWith(NewSel);
Shift->eraseFromParent();
return true;
}
/// If we have a SelectInst that will likely profit from branch prediction,
/// turn it into a branch.
bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) {
@ -6988,13 +7023,13 @@ bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) {
EnableAndCmpSinking && TLI)
return sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts);
// TODO: Move this into the switch on opcode - it handles shifts already.
if (BinOp && (BinOp->getOpcode() == Instruction::AShr ||
BinOp->getOpcode() == Instruction::LShr)) {
ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1));
if (TLI && CI && TLI->hasExtractBitsInsn())
return OptimizeExtractBits(BinOp, CI, *TLI, *DL);
return false;
if (OptimizeExtractBits(BinOp, CI, *TLI, *DL))
return true;
}
if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
@ -7019,6 +7054,10 @@ bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) {
return true;
switch (I->getOpcode()) {
case Instruction::Shl:
case Instruction::LShr:
case Instruction::AShr:
return optimizeShiftInst(cast<BinaryOperator>(I));
case Instruction::Call:
return optimizeCallInst(cast<CallInst>(I), ModifiedDT);
case Instruction::Select:

View File

@ -27,110 +27,93 @@ define void @vector_variable_shift_left_loop(i32* nocapture %arr, i8* nocapture
; SSE-NEXT: movl %eax, %edx
; SSE-NEXT: andl $-32, %edx
; SSE-NEXT: movd %r9d, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm0[0,0,0,0]
; SSE-NEXT: movd %r8d, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm0[0,0,0,0]
; SSE-NEXT: movd %r8d, %xmm1
; SSE-NEXT: xorl %ecx, %ecx
; SSE-NEXT: pxor %xmm10, %xmm10
; SSE-NEXT: movdqa {{.*#+}} xmm11 = [1065353216,1065353216,1065353216,1065353216]
; SSE-NEXT: pmovzxdq {{.*#+}} xmm13 = xmm1[0],zero,xmm1[1],zero
; SSE-NEXT: pmovzxdq {{.*#+}} xmm14 = xmm0[0],zero,xmm0[1],zero
; SSE-NEXT: .p2align 4, 0x90
; SSE-NEXT: .LBB0_4: # %vector.body
; SSE-NEXT: # =>This Inner Loop Header: Depth=1
; SSE-NEXT: pmovzxbw {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; SSE-NEXT: pmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; SSE-NEXT: pmovzxbw {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; SSE-NEXT: pmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; SSE-NEXT: pmovzxbw {{.*#+}} xmm14 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; SSE-NEXT: pcmpeqw %xmm10, %xmm4
; SSE-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
; SSE-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
; SSE-NEXT: pslld $24, %xmm4
; SSE-NEXT: pmovzxbw {{.*#+}} xmm15 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: pcmpeqw %xmm1, %xmm0
; SSE-NEXT: pmovzxwd {{.*#+}} xmm12 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE-NEXT: pslld $24, %xmm12
; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; SSE-NEXT: pslld $24, %xmm0
; SSE-NEXT: pcmpeqw %xmm10, %xmm3
; SSE-NEXT: pmovzxwd {{.*#+}} xmm12 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
; SSE-NEXT: pcmpeqw %xmm1, %xmm3
; SSE-NEXT: pmovzxwd {{.*#+}} xmm11 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
; SSE-NEXT: pslld $24, %xmm11
; SSE-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; SSE-NEXT: pslld $24, %xmm3
; SSE-NEXT: pslld $24, %xmm12
; SSE-NEXT: pcmpeqw %xmm10, %xmm2
; SSE-NEXT: pmovzxwd {{.*#+}} xmm6 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; SSE-NEXT: pcmpeqw %xmm1, %xmm2
; SSE-NEXT: pmovzxwd {{.*#+}} xmm9 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; SSE-NEXT: pslld $24, %xmm9
; SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE-NEXT: pslld $24, %xmm2
; SSE-NEXT: pslld $24, %xmm6
; SSE-NEXT: pcmpeqw %xmm10, %xmm14
; SSE-NEXT: pmovzxwd {{.*#+}} xmm13 = xmm14[0],zero,xmm14[1],zero,xmm14[2],zero,xmm14[3],zero
; SSE-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
; SSE-NEXT: pslld $24, %xmm14
; SSE-NEXT: pslld $24, %xmm13
; SSE-NEXT: movdqa %xmm9, %xmm7
; SSE-NEXT: blendvps %xmm0, %xmm8, %xmm7
; SSE-NEXT: movdqa %xmm9, %xmm1
; SSE-NEXT: movdqa %xmm4, %xmm0
; SSE-NEXT: blendvps %xmm0, %xmm8, %xmm1
; SSE-NEXT: movdqa %xmm9, %xmm5
; SSE-NEXT: pcmpeqw %xmm1, %xmm15
; SSE-NEXT: pmovzxwd {{.*#+}} xmm8 = xmm15[0],zero,xmm15[1],zero,xmm15[2],zero,xmm15[3],zero
; SSE-NEXT: pslld $24, %xmm8
; SSE-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
; SSE-NEXT: pslld $24, %xmm15
; SSE-NEXT: movdqu 16(%rdi,%rcx,4), %xmm6
; SSE-NEXT: movdqa %xmm6, %xmm4
; SSE-NEXT: pslld %xmm14, %xmm4
; SSE-NEXT: pslld %xmm13, %xmm6
; SSE-NEXT: blendvps %xmm0, %xmm4, %xmm6
; SSE-NEXT: movdqu (%rdi,%rcx,4), %xmm10
; SSE-NEXT: movdqa %xmm10, %xmm4
; SSE-NEXT: pslld %xmm14, %xmm4
; SSE-NEXT: pslld %xmm13, %xmm10
; SSE-NEXT: movdqa %xmm12, %xmm0
; SSE-NEXT: blendvps %xmm0, %xmm8, %xmm5
; SSE-NEXT: movdqa %xmm9, %xmm4
; SSE-NEXT: blendvps %xmm0, %xmm4, %xmm10
; SSE-NEXT: movdqu 48(%rdi,%rcx,4), %xmm12
; SSE-NEXT: movdqa %xmm12, %xmm5
; SSE-NEXT: pslld %xmm14, %xmm5
; SSE-NEXT: pslld %xmm13, %xmm12
; SSE-NEXT: movdqa %xmm3, %xmm0
; SSE-NEXT: blendvps %xmm0, %xmm8, %xmm4
; SSE-NEXT: movdqa %xmm9, %xmm3
; SSE-NEXT: movdqa %xmm6, %xmm0
; SSE-NEXT: blendvps %xmm0, %xmm8, %xmm3
; SSE-NEXT: movdqa %xmm9, %xmm6
; SSE-NEXT: blendvps %xmm0, %xmm5, %xmm12
; SSE-NEXT: movdqu 32(%rdi,%rcx,4), %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm5
; SSE-NEXT: pslld %xmm14, %xmm5
; SSE-NEXT: pslld %xmm13, %xmm3
; SSE-NEXT: movdqa %xmm11, %xmm0
; SSE-NEXT: blendvps %xmm0, %xmm5, %xmm3
; SSE-NEXT: movdqu 80(%rdi,%rcx,4), %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm5
; SSE-NEXT: pslld %xmm14, %xmm5
; SSE-NEXT: pslld %xmm13, %xmm1
; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: blendvps %xmm0, %xmm8, %xmm6
; SSE-NEXT: movdqa %xmm9, %xmm12
; SSE-NEXT: movdqa %xmm13, %xmm0
; SSE-NEXT: blendvps %xmm0, %xmm8, %xmm12
; SSE-NEXT: movdqa %xmm9, %xmm2
; SSE-NEXT: movdqa %xmm14, %xmm0
; SSE-NEXT: blendvps %xmm0, %xmm8, %xmm2
; SSE-NEXT: movdqu 16(%rdi,%rcx,4), %xmm0
; SSE-NEXT: pslld $23, %xmm1
; SSE-NEXT: paddd %xmm11, %xmm1
; SSE-NEXT: cvttps2dq %xmm1, %xmm13
; SSE-NEXT: pmulld %xmm0, %xmm13
; SSE-NEXT: movdqu (%rdi,%rcx,4), %xmm0
; SSE-NEXT: pslld $23, %xmm7
; SSE-NEXT: paddd %xmm11, %xmm7
; SSE-NEXT: cvttps2dq %xmm7, %xmm1
; SSE-NEXT: pmulld %xmm0, %xmm1
; SSE-NEXT: movdqu 48(%rdi,%rcx,4), %xmm0
; SSE-NEXT: pslld $23, %xmm4
; SSE-NEXT: paddd %xmm11, %xmm4
; SSE-NEXT: cvttps2dq %xmm4, %xmm7
; SSE-NEXT: pmulld %xmm0, %xmm7
; SSE-NEXT: movdqu 32(%rdi,%rcx,4), %xmm0
; SSE-NEXT: pslld $23, %xmm5
; SSE-NEXT: paddd %xmm11, %xmm5
; SSE-NEXT: cvttps2dq %xmm5, %xmm4
; SSE-NEXT: pmulld %xmm0, %xmm4
; SSE-NEXT: movdqu 80(%rdi,%rcx,4), %xmm0
; SSE-NEXT: pslld $23, %xmm6
; SSE-NEXT: paddd %xmm11, %xmm6
; SSE-NEXT: cvttps2dq %xmm6, %xmm5
; SSE-NEXT: pmulld %xmm0, %xmm5
; SSE-NEXT: movdqu 64(%rdi,%rcx,4), %xmm0
; SSE-NEXT: pslld $23, %xmm3
; SSE-NEXT: paddd %xmm11, %xmm3
; SSE-NEXT: cvttps2dq %xmm3, %xmm3
; SSE-NEXT: pmulld %xmm0, %xmm3
; SSE-NEXT: movdqu 112(%rdi,%rcx,4), %xmm0
; SSE-NEXT: pslld $23, %xmm2
; SSE-NEXT: paddd %xmm11, %xmm2
; SSE-NEXT: cvttps2dq %xmm2, %xmm2
; SSE-NEXT: pmulld %xmm0, %xmm2
; SSE-NEXT: movdqu 96(%rdi,%rcx,4), %xmm0
; SSE-NEXT: pslld $23, %xmm12
; SSE-NEXT: paddd %xmm11, %xmm12
; SSE-NEXT: cvttps2dq %xmm12, %xmm6
; SSE-NEXT: pmulld %xmm0, %xmm6
; SSE-NEXT: movdqu %xmm1, (%rdi,%rcx,4)
; SSE-NEXT: movdqu %xmm13, 16(%rdi,%rcx,4)
; SSE-NEXT: movdqu %xmm4, 32(%rdi,%rcx,4)
; SSE-NEXT: movdqu %xmm7, 48(%rdi,%rcx,4)
; SSE-NEXT: movdqu %xmm3, 64(%rdi,%rcx,4)
; SSE-NEXT: movdqu %xmm5, 80(%rdi,%rcx,4)
; SSE-NEXT: movdqu %xmm6, 96(%rdi,%rcx,4)
; SSE-NEXT: movdqu %xmm2, 112(%rdi,%rcx,4)
; SSE-NEXT: blendvps %xmm0, %xmm5, %xmm1
; SSE-NEXT: movdqu 64(%rdi,%rcx,4), %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm5
; SSE-NEXT: pslld %xmm14, %xmm5
; SSE-NEXT: pslld %xmm13, %xmm2
; SSE-NEXT: movdqa %xmm9, %xmm0
; SSE-NEXT: blendvps %xmm0, %xmm5, %xmm2
; SSE-NEXT: movdqu 112(%rdi,%rcx,4), %xmm5
; SSE-NEXT: movdqa %xmm5, %xmm7
; SSE-NEXT: pslld %xmm14, %xmm7
; SSE-NEXT: pslld %xmm13, %xmm5
; SSE-NEXT: movdqa %xmm15, %xmm0
; SSE-NEXT: blendvps %xmm0, %xmm7, %xmm5
; SSE-NEXT: movdqu 96(%rdi,%rcx,4), %xmm7
; SSE-NEXT: movdqa %xmm7, %xmm4
; SSE-NEXT: pslld %xmm14, %xmm4
; SSE-NEXT: pslld %xmm13, %xmm7
; SSE-NEXT: movdqa %xmm8, %xmm0
; SSE-NEXT: blendvps %xmm0, %xmm4, %xmm7
; SSE-NEXT: movups %xmm10, (%rdi,%rcx,4)
; SSE-NEXT: movups %xmm6, 16(%rdi,%rcx,4)
; SSE-NEXT: movups %xmm3, 32(%rdi,%rcx,4)
; SSE-NEXT: movups %xmm12, 48(%rdi,%rcx,4)
; SSE-NEXT: movups %xmm2, 64(%rdi,%rcx,4)
; SSE-NEXT: movups %xmm1, 80(%rdi,%rcx,4)
; SSE-NEXT: movups %xmm7, 96(%rdi,%rcx,4)
; SSE-NEXT: movups %xmm5, 112(%rdi,%rcx,4)
; SSE-NEXT: addq $32, %rcx
; SSE-NEXT: cmpq %rcx, %rdx
; SSE-NEXT: jne .LBB0_4
@ -159,6 +142,7 @@ define void @vector_variable_shift_left_loop(i32* nocapture %arr, i8* nocapture
;
; AVX1-LABEL: vector_variable_shift_left_loop:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: subq $24, %rsp
; AVX1-NEXT: testl %edx, %edx
; AVX1-NEXT: jle .LBB0_9
; AVX1-NEXT: # %bb.1: # %for.body.preheader
@ -173,87 +157,90 @@ define void @vector_variable_shift_left_loop(i32* nocapture %arr, i8* nocapture
; AVX1-NEXT: movl %eax, %edx
; AVX1-NEXT: andl $-32, %edx
; AVX1-NEXT: vmovd %r9d, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm11
; AVX1-NEXT: vmovd %r8d, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm12
; AVX1-NEXT: xorl %ecx, %ecx
; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm11, %xmm14
; AVX1-NEXT: vextractf128 $1, %ymm12, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1065353216,1065353216,1065353216,1065353216]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
; AVX1-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
; AVX1-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
; AVX1-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm13 = xmm0[0],zero,xmm0[1],zero
; AVX1-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm14 = xmm1[0],zero,xmm1[1],zero
; AVX1-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm15 = xmm0[0],zero,xmm0[1],zero
; AVX1-NEXT: vpxor %xmm11, %xmm11, %xmm11
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
; AVX1-NEXT: .p2align 4, 0x90
; AVX1-NEXT: .LBB0_4: # %vector.body
; AVX1-NEXT: # =>This Inner Loop Header: Depth=1
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vpmovzxdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; AVX1-NEXT: # xmm1 = mem[0],zero,mem[1],zero
; AVX1-NEXT: vpmovzxdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
; AVX1-NEXT: # xmm2 = mem[0],zero,mem[1],zero
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vpcmpeqw %xmm8, %xmm2, %xmm2
; AVX1-NEXT: vpmovsxwd %xmm2, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
; AVX1-NEXT: vpcmpeqw %xmm8, %xmm6, %xmm6
; AVX1-NEXT: vpmovsxwd %xmm6, %xmm3
; AVX1-NEXT: vpcmpeqw %xmm11, %xmm3, %xmm3
; AVX1-NEXT: vpmovsxwd %xmm3, %xmm7
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3
; AVX1-NEXT: vpcmpeqw %xmm11, %xmm4, %xmm4
; AVX1-NEXT: vpmovsxwd %xmm4, %xmm8
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
; AVX1-NEXT: vpmovsxwd %xmm4, %xmm4
; AVX1-NEXT: vpcmpeqw %xmm11, %xmm5, %xmm5
; AVX1-NEXT: vmovdqu (%rdi,%rcx,4), %xmm9
; AVX1-NEXT: vpslld %xmm2, %xmm9, %xmm10
; AVX1-NEXT: vpslld %xmm1, %xmm9, %xmm0
; AVX1-NEXT: vblendvps %xmm7, %xmm10, %xmm0, %xmm9
; AVX1-NEXT: vpmovsxwd %xmm5, %xmm7
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,3,0,1]
; AVX1-NEXT: vpmovsxwd %xmm5, %xmm5
; AVX1-NEXT: vpcmpeqw %xmm11, %xmm6, %xmm6
; AVX1-NEXT: vmovdqu 16(%rdi,%rcx,4), %xmm0
; AVX1-NEXT: vpslld %xmm2, %xmm0, %xmm2
; AVX1-NEXT: vpslld %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpmovsxwd %xmm6, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[2,3,0,1]
; AVX1-NEXT: vpmovsxwd %xmm6, %xmm6
; AVX1-NEXT: vpcmpeqw %xmm8, %xmm7, %xmm7
; AVX1-NEXT: vpmovsxwd %xmm7, %xmm13
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,3,0,1]
; AVX1-NEXT: vpmovsxwd %xmm7, %xmm7
; AVX1-NEXT: vpcmpeqw %xmm8, %xmm0, %xmm0
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm9
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX1-NEXT: vblendvps %xmm2, %xmm14, %xmm4, %xmm2
; AVX1-NEXT: vpslld $23, %xmm2, %xmm2
; AVX1-NEXT: vpaddd %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vcvttps2dq %xmm2, %xmm2
; AVX1-NEXT: vpmulld 16(%rdi,%rcx,4), %xmm2, %xmm10
; AVX1-NEXT: vblendvps %xmm1, %xmm11, %xmm12, %xmm1
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
; AVX1-NEXT: vpaddd %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
; AVX1-NEXT: vpmulld (%rdi,%rcx,4), %xmm1, %xmm15
; AVX1-NEXT: vblendvps %xmm6, %xmm14, %xmm4, %xmm2
; AVX1-NEXT: vpslld $23, %xmm2, %xmm2
; AVX1-NEXT: vpaddd %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vcvttps2dq %xmm2, %xmm2
; AVX1-NEXT: vpmulld 48(%rdi,%rcx,4), %xmm2, %xmm2
; AVX1-NEXT: vblendvps %xmm3, %xmm11, %xmm12, %xmm3
; AVX1-NEXT: vpslld $23, %xmm3, %xmm3
; AVX1-NEXT: vpaddd %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vcvttps2dq %xmm3, %xmm3
; AVX1-NEXT: vpmulld 32(%rdi,%rcx,4), %xmm3, %xmm3
; AVX1-NEXT: vblendvps %xmm7, %xmm14, %xmm4, %xmm6
; AVX1-NEXT: vpslld $23, %xmm6, %xmm6
; AVX1-NEXT: vpaddd %xmm5, %xmm6, %xmm6
; AVX1-NEXT: vcvttps2dq %xmm6, %xmm6
; AVX1-NEXT: vpmulld 80(%rdi,%rcx,4), %xmm6, %xmm6
; AVX1-NEXT: vblendvps %xmm13, %xmm11, %xmm12, %xmm7
; AVX1-NEXT: vpslld $23, %xmm7, %xmm7
; AVX1-NEXT: vpaddd %xmm5, %xmm7, %xmm7
; AVX1-NEXT: vcvttps2dq %xmm7, %xmm7
; AVX1-NEXT: vpmulld 64(%rdi,%rcx,4), %xmm7, %xmm7
; AVX1-NEXT: vblendvps %xmm0, %xmm14, %xmm4, %xmm0
; AVX1-NEXT: vpslld $23, %xmm0, %xmm0
; AVX1-NEXT: vpaddd %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
; AVX1-NEXT: vpmulld 112(%rdi,%rcx,4), %xmm0, %xmm0
; AVX1-NEXT: vblendvps %xmm9, %xmm11, %xmm12, %xmm1
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
; AVX1-NEXT: vpaddd %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
; AVX1-NEXT: vpmulld 96(%rdi,%rcx,4), %xmm1, %xmm1
; AVX1-NEXT: vmovdqu %xmm15, (%rdi,%rcx,4)
; AVX1-NEXT: vmovdqu %xmm10, 16(%rdi,%rcx,4)
; AVX1-NEXT: vmovdqu %xmm3, 32(%rdi,%rcx,4)
; AVX1-NEXT: vmovdqu %xmm2, 48(%rdi,%rcx,4)
; AVX1-NEXT: vmovdqu %xmm7, 64(%rdi,%rcx,4)
; AVX1-NEXT: vmovdqu %xmm6, 80(%rdi,%rcx,4)
; AVX1-NEXT: vmovdqu %xmm1, 96(%rdi,%rcx,4)
; AVX1-NEXT: vmovdqu %xmm0, 112(%rdi,%rcx,4)
; AVX1-NEXT: vblendvps %xmm3, %xmm2, %xmm0, %xmm10
; AVX1-NEXT: vmovdqu 32(%rdi,%rcx,4), %xmm2
; AVX1-NEXT: vpslld %xmm15, %xmm2, %xmm3
; AVX1-NEXT: vpslld %xmm14, %xmm2, %xmm2
; AVX1-NEXT: vblendvps %xmm8, %xmm3, %xmm2, %xmm8
; AVX1-NEXT: vmovdqu 48(%rdi,%rcx,4), %xmm3
; AVX1-NEXT: vpslld %xmm15, %xmm3, %xmm0
; AVX1-NEXT: vpslld %xmm14, %xmm3, %xmm3
; AVX1-NEXT: vblendvps %xmm4, %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vmovdqu 64(%rdi,%rcx,4), %xmm3
; AVX1-NEXT: vpslld %xmm13, %xmm3, %xmm4
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; AVX1-NEXT: vpslld %xmm2, %xmm3, %xmm3
; AVX1-NEXT: vblendvps %xmm7, %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vmovdqu 80(%rdi,%rcx,4), %xmm4
; AVX1-NEXT: vpslld %xmm13, %xmm4, %xmm7
; AVX1-NEXT: vpslld %xmm2, %xmm4, %xmm4
; AVX1-NEXT: vblendvps %xmm5, %xmm7, %xmm4, %xmm4
; AVX1-NEXT: vmovdqu 96(%rdi,%rcx,4), %xmm5
; AVX1-NEXT: vpslld %xmm12, %xmm5, %xmm7
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; AVX1-NEXT: vpslld %xmm2, %xmm5, %xmm5
; AVX1-NEXT: vblendvps %xmm1, %xmm7, %xmm5, %xmm1
; AVX1-NEXT: vmovdqu 112(%rdi,%rcx,4), %xmm5
; AVX1-NEXT: vpslld %xmm12, %xmm5, %xmm7
; AVX1-NEXT: vpslld %xmm2, %xmm5, %xmm5
; AVX1-NEXT: vblendvps %xmm6, %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vmovups %xmm9, (%rdi,%rcx,4)
; AVX1-NEXT: vmovups %xmm10, 16(%rdi,%rcx,4)
; AVX1-NEXT: vmovups %xmm8, 32(%rdi,%rcx,4)
; AVX1-NEXT: vmovups %xmm0, 48(%rdi,%rcx,4)
; AVX1-NEXT: vmovups %xmm3, 64(%rdi,%rcx,4)
; AVX1-NEXT: vmovups %xmm4, 80(%rdi,%rcx,4)
; AVX1-NEXT: vmovups %xmm1, 96(%rdi,%rcx,4)
; AVX1-NEXT: vmovups %xmm5, 112(%rdi,%rcx,4)
; AVX1-NEXT: addq $32, %rcx
; AVX1-NEXT: cmpq %rcx, %rdx
; AVX1-NEXT: jne .LBB0_4
@ -261,6 +248,7 @@ define void @vector_variable_shift_left_loop(i32* nocapture %arr, i8* nocapture
; AVX1-NEXT: cmpq %rax, %rdx
; AVX1-NEXT: jne .LBB0_6
; AVX1-NEXT: .LBB0_9: # %for.cond.cleanup
; AVX1-NEXT: addq $24, %rsp
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
; AVX1-NEXT: .p2align 4, 0x90
@ -469,26 +457,29 @@ define void @vector_variable_shift_left_loop_simpler(i32* nocapture %arr, i8* no
; SSE-NEXT: movl %edx, %eax
; SSE-NEXT: andl $-4, %eax
; SSE-NEXT: movd %ecx, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
; SSE-NEXT: movd %r8d, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,0,0]
; SSE-NEXT: movd %r9d, %xmm0
; SSE-NEXT: movd %r8d, %xmm2
; SSE-NEXT: movd %r9d, %xmm3
; SSE-NEXT: xorl %ecx, %ecx
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0]
; SSE-NEXT: pxor %xmm4, %xmm4
; SSE-NEXT: movdqa {{.*#+}} xmm5 = [1065353216,1065353216,1065353216,1065353216]
; SSE-NEXT: pslld $23, %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [1065353216,1065353216,1065353216,1065353216]
; SSE-NEXT: paddd %xmm4, %xmm0
; SSE-NEXT: cvttps2dq %xmm0, %xmm0
; SSE-NEXT: pmulld %xmm3, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
; SSE-NEXT: pslld $23, %xmm2
; SSE-NEXT: paddd %xmm4, %xmm2
; SSE-NEXT: cvttps2dq %xmm2, %xmm0
; SSE-NEXT: pmulld %xmm3, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,0,0]
; SSE-NEXT: pxor %xmm3, %xmm3
; SSE-NEXT: .p2align 4, 0x90
; SSE-NEXT: .LBB1_2: # %vector.body
; SSE-NEXT: # =>This Inner Loop Header: Depth=1
; SSE-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE-NEXT: pcmpeqd %xmm4, %xmm0
; SSE-NEXT: movdqa %xmm2, %xmm6
; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm6
; SSE-NEXT: pslld $23, %xmm6
; SSE-NEXT: paddd %xmm5, %xmm6
; SSE-NEXT: cvttps2dq %xmm6, %xmm0
; SSE-NEXT: pmulld %xmm3, %xmm0
; SSE-NEXT: movdqu %xmm0, (%rdi,%rcx,4)
; SSE-NEXT: pcmpeqd %xmm3, %xmm0
; SSE-NEXT: movdqa %xmm2, %xmm4
; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm4
; SSE-NEXT: movups %xmm4, (%rdi,%rcx,4)
; SSE-NEXT: addq $4, %rcx
; SSE-NEXT: cmpq %rcx, %rax
; SSE-NEXT: jne .LBB1_2
@ -503,25 +494,28 @@ define void @vector_variable_shift_left_loop_simpler(i32* nocapture %arr, i8* no
; AVX1-NEXT: movl %edx, %eax
; AVX1-NEXT: andl $-4, %eax
; AVX1-NEXT: vmovd %ecx, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: vmovd %r8d, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX1-NEXT: vmovd %r9d, %xmm2
; AVX1-NEXT: xorl %ecx, %ecx
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [1065353216,1065353216,1065353216,1065353216]
; AVX1-NEXT: vpslld $23, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1065353216,1065353216,1065353216,1065353216]
; AVX1-NEXT: vpaddd %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
; AVX1-NEXT: vpmulld %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
; AVX1-NEXT: vpmulld %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: .p2align 4, 0x90
; AVX1-NEXT: .LBB1_2: # %vector.body
; AVX1-NEXT: # =>This Inner Loop Header: Depth=1
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm5, %xmm5
; AVX1-NEXT: vblendvps %xmm5, %xmm0, %xmm1, %xmm5
; AVX1-NEXT: vpslld $23, %xmm5, %xmm5
; AVX1-NEXT: vpaddd %xmm4, %xmm5, %xmm5
; AVX1-NEXT: vcvttps2dq %xmm5, %xmm5
; AVX1-NEXT: vpmulld %xmm5, %xmm2, %xmm5
; AVX1-NEXT: vmovdqu %xmm5, (%rdi,%rcx,4)
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpcmpeqd %xmm2, %xmm3, %xmm3
; AVX1-NEXT: vblendvps %xmm3, %xmm0, %xmm1, %xmm3
; AVX1-NEXT: vmovups %xmm3, (%rdi,%rcx,4)
; AVX1-NEXT: addq $4, %rcx
; AVX1-NEXT: cmpq %rcx, %rax
; AVX1-NEXT: jne .LBB1_2

View File

@ -1237,73 +1237,40 @@ define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) nounwind {
define <4 x i32> @vector_variable_shift_right(<4 x i1> %cond, <4 x i32> %x, <4 x i32> %y, <4 x i32> %z) nounwind {
; SSE2-LABEL: vector_variable_shift_right:
; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm4, %xmm4
; SSE2-NEXT: xorps %xmm5, %xmm5
; SSE2-NEXT: movss {{.*#+}} xmm5 = xmm2[0],xmm5[1,2,3]
; SSE2-NEXT: movss {{.*#+}} xmm4 = xmm1[0],xmm4[1,2,3]
; SSE2-NEXT: pslld $31, %xmm0
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: pandn %xmm2, %xmm0
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[2,3,3,3,4,5,6,7]
; SSE2-NEXT: movdqa %xmm3, %xmm2
; SSE2-NEXT: psrld %xmm1, %xmm2
; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,1,1,1,4,5,6,7]
; SSE2-NEXT: movdqa %xmm3, %xmm1
; SSE2-NEXT: psrld %xmm4, %xmm1
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[2,3,3,3,4,5,6,7]
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: psrld %xmm2, %xmm4
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,1,4,5,6,7]
; SSE2-NEXT: psrld %xmm0, %xmm3
; SSE2-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm4[1]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: psrld %xmm5, %xmm3
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: pandn %xmm3, %xmm0
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: vector_variable_shift_right:
; SSE41: # %bb.0:
; SSE41-NEXT: pslld $31, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE41-NEXT: blendvps %xmm0, %xmm1, %xmm2
; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[2,3,3,3,4,5,6,7]
; SSE41-NEXT: movdqa %xmm3, %xmm1
; SSE41-NEXT: psrld %xmm0, %xmm1
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[2,3,3,3,4,5,6,7]
; SSE41-NEXT: movdqa %xmm3, %xmm5
; SSE41-NEXT: psrld %xmm4, %xmm5
; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm1[0,1,2,3],xmm5[4,5,6,7]
; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
; SSE41-NEXT: movdqa %xmm3, %xmm2
; SSE41-NEXT: psrld %xmm1, %xmm2
; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,1,4,5,6,7]
; SSE41-NEXT: psrld %xmm0, %xmm3
; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm2[0,1,2,3],xmm3[4,5,6,7]
; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm5[2,3],xmm3[4,5],xmm5[6,7]
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; SSE41-NEXT: movdqa %xmm3, %xmm4
; SSE41-NEXT: psrld %xmm1, %xmm4
; SSE41-NEXT: psrld %xmm2, %xmm3
; SSE41-NEXT: blendvps %xmm0, %xmm4, %xmm3
; SSE41-NEXT: movaps %xmm3, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: vector_variable_shift_right:
; AVX1: # %bb.0:
; AVX1-NEXT: vpslld $31, %xmm0, %xmm0
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[0,0,0,0]
; AVX1-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
; AVX1-NEXT: vpsrldq {{.*#+}} xmm1 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX1-NEXT: vpsrld %xmm1, %xmm3, %xmm1
; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm2
; AVX1-NEXT: vpsrld %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; AVX1-NEXT: vpsrld %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX1-NEXT: vpsrld %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; AVX1-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: vector_variable_shift_right:
@ -1381,30 +1348,19 @@ define <4 x i32> @vector_variable_shift_right(<4 x i1> %cond, <4 x i32> %x, <4 x
; X32-SSE-NEXT: movl %esp, %ebp
; X32-SSE-NEXT: andl $-16, %esp
; X32-SSE-NEXT: subl $16, %esp
; X32-SSE-NEXT: xorps %xmm3, %xmm3
; X32-SSE-NEXT: xorps %xmm4, %xmm4
; X32-SSE-NEXT: movss {{.*#+}} xmm4 = xmm2[0],xmm4[1,2,3]
; X32-SSE-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
; X32-SSE-NEXT: pslld $31, %xmm0
; X32-SSE-NEXT: psrad $31, %xmm0
; X32-SSE-NEXT: movdqa 8(%ebp), %xmm3
; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; X32-SSE-NEXT: pand %xmm0, %xmm1
; X32-SSE-NEXT: pandn %xmm2, %xmm0
; X32-SSE-NEXT: por %xmm1, %xmm0
; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[2,3,3,3,4,5,6,7]
; X32-SSE-NEXT: movdqa %xmm3, %xmm2
; X32-SSE-NEXT: psrld %xmm1, %xmm2
; X32-SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,1,1,1,4,5,6,7]
; X32-SSE-NEXT: movdqa %xmm3, %xmm1
; X32-SSE-NEXT: movdqa 8(%ebp), %xmm1
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
; X32-SSE-NEXT: psrld %xmm3, %xmm2
; X32-SSE-NEXT: psrld %xmm4, %xmm1
; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[2,3,3,3,4,5,6,7]
; X32-SSE-NEXT: movdqa %xmm3, %xmm4
; X32-SSE-NEXT: psrld %xmm2, %xmm4
; X32-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,1,4,5,6,7]
; X32-SSE-NEXT: psrld %xmm0, %xmm3
; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm4[1]
; X32-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3]
; X32-SSE-NEXT: movaps %xmm1, %xmm0
; X32-SSE-NEXT: pand %xmm0, %xmm2
; X32-SSE-NEXT: pandn %xmm1, %xmm0
; X32-SSE-NEXT: por %xmm2, %xmm0
; X32-SSE-NEXT: movl %ebp, %esp
; X32-SSE-NEXT: popl %ebp
; X32-SSE-NEXT: retl

View File

@ -4,12 +4,21 @@
; RUN: opt -codegenprepare -mtriple=x86_64-- -mattr=avx -S -enable-debugify < %s 2>&1 | FileCheck %s -check-prefix=DEBUG
define <4 x i32> @vector_variable_shift_right(<4 x i1> %cond, <4 x i32> %x, <4 x i32> %y, <4 x i32> %z) {
; ALL-LABEL: @vector_variable_shift_right(
; ALL-NEXT: [[SPLAT1:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> undef, <4 x i32> zeroinitializer
; ALL-NEXT: [[SPLAT2:%.*]] = shufflevector <4 x i32> [[Y:%.*]], <4 x i32> undef, <4 x i32> zeroinitializer
; ALL-NEXT: [[SEL:%.*]] = select <4 x i1> [[COND:%.*]], <4 x i32> [[SPLAT1]], <4 x i32> [[SPLAT2]]
; ALL-NEXT: [[SH:%.*]] = lshr <4 x i32> [[Z:%.*]], [[SEL]]
; ALL-NEXT: ret <4 x i32> [[SH]]
; AVX-LABEL: @vector_variable_shift_right(
; AVX-NEXT: [[SPLAT1:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> undef, <4 x i32> zeroinitializer
; AVX-NEXT: [[SPLAT2:%.*]] = shufflevector <4 x i32> [[Y:%.*]], <4 x i32> undef, <4 x i32> zeroinitializer
; AVX-NEXT: [[SEL:%.*]] = select <4 x i1> [[COND:%.*]], <4 x i32> [[SPLAT1]], <4 x i32> [[SPLAT2]]
; AVX-NEXT: [[TMP1:%.*]] = lshr <4 x i32> [[Z:%.*]], [[SPLAT1]]
; AVX-NEXT: [[TMP2:%.*]] = lshr <4 x i32> [[Z]], [[SPLAT2]]
; AVX-NEXT: [[TMP3:%.*]] = select <4 x i1> [[COND]], <4 x i32> [[TMP1]], <4 x i32> [[TMP2]]
; AVX-NEXT: ret <4 x i32> [[TMP3]]
;
; AVX2-LABEL: @vector_variable_shift_right(
; AVX2-NEXT: [[SPLAT1:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> undef, <4 x i32> zeroinitializer
; AVX2-NEXT: [[SPLAT2:%.*]] = shufflevector <4 x i32> [[Y:%.*]], <4 x i32> undef, <4 x i32> zeroinitializer
; AVX2-NEXT: [[SEL:%.*]] = select <4 x i1> [[COND:%.*]], <4 x i32> [[SPLAT1]], <4 x i32> [[SPLAT2]]
; AVX2-NEXT: [[SH:%.*]] = lshr <4 x i32> [[Z:%.*]], [[SEL]]
; AVX2-NEXT: ret <4 x i32> [[SH]]
;
%splat1 = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> zeroinitializer
%splat2 = shufflevector <4 x i32> %y, <4 x i32> undef, <4 x i32> zeroinitializer
@ -36,19 +45,23 @@ define void @vector_variable_shift_left_loop(i32* nocapture %arr, i8* nocapture
; AVX-NEXT: br label [[VECTOR_BODY:%.*]]
; AVX: vector.body:
; AVX-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; AVX-NEXT: [[TMP0:%.*]] = shufflevector <4 x i32> [[SPLATINSERT22]], <4 x i32> undef, <4 x i32> zeroinitializer
; AVX-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[CONTROL:%.*]], i64 [[INDEX]]
; AVX-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to <4 x i8>*
; AVX-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, <4 x i8>* [[TMP2]], align 1
; AVX-NEXT: [[TMP3:%.*]] = icmp eq <4 x i8> [[WIDE_LOAD]], zeroinitializer
; AVX-NEXT: [[TMP4:%.*]] = select <4 x i1> [[TMP3]], <4 x i32> [[SPLAT1]], <4 x i32> [[SPLAT2]]
; AVX-NEXT: [[TMP5:%.*]] = shl <4 x i32> [[TMP0]], [[TMP4]]
; AVX-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[ARR:%.*]], i64 [[INDEX]]
; AVX-NEXT: [[TMP7:%.*]] = bitcast i32* [[TMP6]] to <4 x i32>*
; AVX-NEXT: store <4 x i32> [[TMP5]], <4 x i32>* [[TMP7]], align 4
; AVX-NEXT: [[TMP0:%.*]] = shufflevector <4 x i32> [[SPLATINSERT20]], <4 x i32> undef, <4 x i32> zeroinitializer
; AVX-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[SPLATINSERT18]], <4 x i32> undef, <4 x i32> zeroinitializer
; AVX-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[SPLATINSERT22]], <4 x i32> undef, <4 x i32> zeroinitializer
; AVX-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, i8* [[CONTROL:%.*]], i64 [[INDEX]]
; AVX-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to <4 x i8>*
; AVX-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, <4 x i8>* [[TMP4]], align 1
; AVX-NEXT: [[TMP5:%.*]] = icmp eq <4 x i8> [[WIDE_LOAD]], zeroinitializer
; AVX-NEXT: [[TMP6:%.*]] = select <4 x i1> [[TMP5]], <4 x i32> [[SPLAT1]], <4 x i32> [[SPLAT2]]
; AVX-NEXT: [[TMP7:%.*]] = shl <4 x i32> [[TMP2]], [[TMP1]]
; AVX-NEXT: [[TMP8:%.*]] = shl <4 x i32> [[TMP2]], [[TMP0]]
; AVX-NEXT: [[TMP9:%.*]] = select <4 x i1> [[TMP5]], <4 x i32> [[TMP7]], <4 x i32> [[TMP8]]
; AVX-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[ARR:%.*]], i64 [[INDEX]]
; AVX-NEXT: [[TMP11:%.*]] = bitcast i32* [[TMP10]] to <4 x i32>*
; AVX-NEXT: store <4 x i32> [[TMP9]], <4 x i32>* [[TMP11]], align 4
; AVX-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
; AVX-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; AVX-NEXT: br i1 [[TMP8]], label [[EXIT]], label [[VECTOR_BODY]]
; AVX-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; AVX-NEXT: br i1 [[TMP12]], label [[EXIT]], label [[VECTOR_BODY]]
; AVX: exit:
; AVX-NEXT: ret void
;