SLP Vectorizer: Implement a simple CSE optimization for the gather sequences.

llvm-svn: 184660
This commit is contained in:
Nadav Rotem 2013-06-23 06:15:46 +00:00
parent 04d6ae65b7
commit eb65e67eea
2 changed files with 130 additions and 24 deletions

View File

@ -20,6 +20,7 @@
#include "llvm/Transforms/Vectorize.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/ScalarEvolution.h"
@ -126,9 +127,9 @@ public:
static const int MAX_COST = INT_MIN;
FuncSLP(Function *Func, ScalarEvolution *Se, DataLayout *Dl,
TargetTransformInfo *Tti, AliasAnalysis *Aa, LoopInfo *Li)
: F(Func), SE(Se), DL(Dl), TTI(Tti), AA(Aa), LI(Li),
Builder(Se->getContext()) {
TargetTransformInfo *Tti, AliasAnalysis *Aa, LoopInfo *Li) :
F(Func), SE(Se), DL(Dl), TTI(Tti), AA(Aa), LI(Li),
Builder(Se->getContext()) {
for (Function::iterator it = F->begin(), e = F->end(); it != e; ++it) {
BasicBlock *BB = it;
BlocksNumbers[BB] = BlockNumbering(BB);
@ -209,9 +210,8 @@ public:
/// \returns a vector from a collection of scalars in \p VL.
Value *Gather(ArrayRef<Value *> VL, VectorType *Ty);
/// \brief Try to hoist gather sequences outside of the loop in cases where
/// all of the sources are loop invariant.
void hoistGatherSequence();
/// \brief Perform LICM and CSE on the newly generated gather sequences.
void optimizeGatherSequence();
bool needToGatherAny(ArrayRef<Value *> VL) {
for (int i = 0, e = VL.size(); i < e; ++i)
@ -574,6 +574,7 @@ Instruction *FuncSLP::getInstructionForIndex(unsigned Index, BasicBlock *BB) {
int FuncSLP::getFirstUserIndex(ArrayRef<Value *> VL) {
BasicBlock *BB = getSameBlock(VL);
assert(BB && "All instructions must come from the same block");
BlockNumbering &BN = BlocksNumbers[BB];
// Find the first user of the values.
@ -932,7 +933,6 @@ Value *FuncSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) {
GatherSeq.insert(I);
}
VectorizedValues[VL[0]] = Vec;
return Vec;
}
@ -1116,11 +1116,12 @@ Value *FuncSLP::vectorizeTree(ArrayRef<Value *> VL) {
// We moved some instructions around. We have to number them again
// before we can do any analysis.
for (Function::iterator it = F->begin(), e = F->end(); it != e; ++it)
BlocksNumbers[it].forget();
// Clear the state.
MustGather.clear();
VectorizedValues.clear();
MemBarrierIgnoreList.clear();
for (Function::iterator it = F->begin(), e = F->end(); it != e; ++it)
BlocksNumbers[it].forget();
return V;
}
@ -1137,24 +1138,19 @@ Value *FuncSLP::vectorizeArith(ArrayRef<Value *> Operands) {
return Vec;
}
void FuncSLP::hoistGatherSequence() {
void FuncSLP::optimizeGatherSequence() {
// LICM InsertElementInst sequences.
for (SetVector<Instruction *>::iterator it = GatherSeq.begin(),
e = GatherSeq.end();
it != e; ++it) {
InsertElementInst *Insert = dyn_cast_or_null<InsertElementInst>(*it);
e = GatherSeq.end(); it != e; ++it) {
InsertElementInst *Insert = dyn_cast<InsertElementInst>(*it);
// The InsertElement sequence can be simplified into a constant.
// Also Ignore NULL pointers because they are only here to separate
// sequences.
if (!Insert)
continue;
BasicBlock *BB = Insert->getParent();
// Check if this block is inside a loop.
Loop *L = LI->getLoopFor(BB);
Loop *L = LI->getLoopFor(Insert->getParent());
if (!L)
return;
continue;
// Check if it has a preheader.
BasicBlock *PreHeader = L->getLoopPreheader();
@ -1171,10 +1167,35 @@ void FuncSLP::hoistGatherSequence() {
if (NewElem && L->contains(NewElem))
continue;
// Mark the insertion point for the block.
Instruction *Location = PreHeader->getTerminator();
// We can hoist this instruction. Move it to the pre-header.
Insert->moveBefore(Location);
Insert->moveBefore(PreHeader->getTerminator());
}
// Perform O(N^2) search over the gather sequences and merge identical
// instructions. TODO: We can further optimize this scan if we split the
// instructions into different buckets based on the insert lane.
SmallPtrSet<Instruction*, 16> Visited;
ReversePostOrderTraversal<Function*> RPOT(F);
for (ReversePostOrderTraversal<Function*>::rpo_iterator I = RPOT.begin(),
E = RPOT.end(); I != E; ++I) {
BasicBlock *BB = *I;
// For all instructions in the function:
for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
InsertElementInst *Insert = dyn_cast<InsertElementInst>(it);
if (!Insert || !GatherSeq.count(Insert))
continue;
// Check if we can replace this instruction with any of the
// visited instructions.
for (SmallPtrSet<Instruction*, 16>::iterator v = Visited.begin(),
ve = Visited.end(); v != ve; ++v) {
if (Insert->isIdenticalTo(*v)) {
Insert->replaceAllUsesWith(*v);
break;
}
}
Visited.insert(Insert);
}
}
}
@ -1232,7 +1253,7 @@ struct SLPVectorizer : public FunctionPass {
}
if (Changed) {
R.hoistGatherSequence();
R.optimizeGatherSequence();
DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n");
DEBUG(verifyFunction(F));
}

View File

@ -0,0 +1,85 @@
; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
target triple = "i386-apple-macosx10.8.0"
;int test(double *G) {
; G[0] = 1+G[5]*4;
; G[1] = 6+G[6]*3;
; G[2] = 7+G[5]*4;
; G[3] = 8+G[6]*4;
;}
;CHECK: @test
;CHECK: insertelement <2 x double> undef
;CHECK-NEXT: insertelement <2 x double>
;CHECK-NEXT: fadd <2 x double>
;CHECK: store <2 x double>
;CHECK: insertelement <2 x double>
;CHECK-NEXT: fadd <2 x double>
;CHECK: store <2 x double>
;CHECK: ret i32
define i32 @test(double* nocapture %G) {
entry:
%arrayidx = getelementptr inbounds double* %G, i64 5
%0 = load double* %arrayidx, align 8
%mul = fmul double %0, 4.000000e+00
%add = fadd double %mul, 1.000000e+00
store double %add, double* %G, align 8
%arrayidx2 = getelementptr inbounds double* %G, i64 6
%1 = load double* %arrayidx2, align 8
%mul3 = fmul double %1, 3.000000e+00
%add4 = fadd double %mul3, 6.000000e+00
%arrayidx5 = getelementptr inbounds double* %G, i64 1
store double %add4, double* %arrayidx5, align 8
%add8 = fadd double %mul, 7.000000e+00
%arrayidx9 = getelementptr inbounds double* %G, i64 2
store double %add8, double* %arrayidx9, align 8
%mul11 = fmul double %1, 4.000000e+00
%add12 = fadd double %mul11, 8.000000e+00
%arrayidx13 = getelementptr inbounds double* %G, i64 3
store double %add12, double* %arrayidx13, align 8
ret i32 undef
}
;int foo(double *A, int n) {
; A[0] = A[0] * 7.9 * n + 6.0;
; A[1] = A[1] * 7.7 * n + 2.0;
; A[2] = A[2] * 7.6 * n + 3.0;
; A[3] = A[3] * 7.4 * n + 4.0;
;}
;CHECK: @foo
;CHECK: insertelement <2 x double>
;CHECK: insertelement <2 x double>
;CHECK-NOT: insertelement <2 x double>
;CHECK: ret
define i32 @foo(double* nocapture %A, i32 %n) {
entry:
%0 = load double* %A, align 8
%mul = fmul double %0, 7.900000e+00
%conv = sitofp i32 %n to double
%mul1 = fmul double %conv, %mul
%add = fadd double %mul1, 6.000000e+00
store double %add, double* %A, align 8
%arrayidx3 = getelementptr inbounds double* %A, i64 1
%1 = load double* %arrayidx3, align 8
%mul4 = fmul double %1, 7.700000e+00
%mul6 = fmul double %conv, %mul4
%add7 = fadd double %mul6, 2.000000e+00
store double %add7, double* %arrayidx3, align 8
%arrayidx9 = getelementptr inbounds double* %A, i64 2
%2 = load double* %arrayidx9, align 8
%mul10 = fmul double %2, 7.600000e+00
%mul12 = fmul double %conv, %mul10
%add13 = fadd double %mul12, 3.000000e+00
store double %add13, double* %arrayidx9, align 8
%arrayidx15 = getelementptr inbounds double* %A, i64 3
%3 = load double* %arrayidx15, align 8
%mul16 = fmul double %3, 7.400000e+00
%mul18 = fmul double %conv, %mul16
%add19 = fadd double %mul18, 4.000000e+00
store double %add19, double* %arrayidx15, align 8
ret i32 undef
}