[LoopUnroll] Implement profile-based loop peeling

This implements PGO-driven loop peeling.

The basic idea is that when the average dynamic trip-count of a loop is known,
based on PGO, to be low, we can expect a performance win by peeling off the
first several iterations of that loop.
Unlike unrolling based on a known trip count, or a trip count multiple, this
doesn't save us the conditional check and branch on each iteration. However,
it does allow us to simplify the straight-line code we get (constant-folding,
etc.). This is important given that we know that we will usually only hit this
code, and not the actual loop.

This is currently disabled by default.

Differential Revision: https://reviews.llvm.org/D25963

llvm-svn: 288274
This commit is contained in:
Michael Kuperstein 2016-11-30 21:13:57 +00:00
parent aa8b28e509
commit b151a641aa
9 changed files with 635 additions and 33 deletions

View File

@ -265,6 +265,11 @@ public:
/// transformation will select an unrolling factor based on the current cost
/// threshold and other factors.
unsigned Count;
/// A forced peeling factor (the number of bodied of the original loop
/// that should be peeled off before the loop body). When set to 0, the
/// unrolling transformation will select a peeling factor based on profile
/// information and other factors.
unsigned PeelCount;
/// Default unroll count for loops with run-time trip count.
unsigned DefaultUnrollRuntimeCount;
// Set the maximum unrolling factor. The unrolling factor may be selected
@ -298,6 +303,8 @@ public:
bool Force;
/// Allow using trip count upper bound to unroll loops.
bool UpperBound;
/// Allow peeling off loop iterations for loops with low dynamic tripcount.
bool AllowPeeling;
};
/// \brief Get target-customized preferences for the generic loop unrolling

View File

@ -16,6 +16,9 @@
#ifndef LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
#define LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
// Needed because we can't forward-declare the nested struct
// TargetTransformInfo::UnrollingPreferences
#include "llvm/Analysis/TargetTransformInfo.h"
namespace llvm {
@ -33,8 +36,8 @@ class ScalarEvolution;
bool UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool Force,
bool AllowRuntime, bool AllowExpensiveTripCount,
bool PreserveCondBr, bool PreserveOnlyFirst,
unsigned TripMultiple, LoopInfo *LI, ScalarEvolution *SE,
DominatorTree *DT, AssumptionCache *AC,
unsigned TripMultiple, unsigned PeelCount, LoopInfo *LI,
ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC,
OptimizationRemarkEmitter *ORE, bool PreserveLCSSA);
bool UnrollRuntimeLoopRemainder(Loop *L, unsigned Count,
@ -43,6 +46,12 @@ bool UnrollRuntimeLoopRemainder(Loop *L, unsigned Count,
ScalarEvolution *SE, DominatorTree *DT,
bool PreserveLCSSA);
void computePeelCount(Loop *L, unsigned LoopSize,
TargetTransformInfo::UnrollingPreferences &UP);
bool peelLoop(Loop *L, unsigned PeelCount, LoopInfo *LI, ScalarEvolution *SE,
DominatorTree *DT, bool PreserveLCSSA);
MDNode *GetUnrollMetadata(MDNode *LoopID, StringRef Name);
}

View File

@ -24,7 +24,6 @@
#include "llvm/Analysis/OptimizationDiagnosticInfo.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/InstVisitor.h"
@ -108,6 +107,11 @@ static cl::opt<unsigned> FlatLoopTripCountThreshold(
"threshold, the loop is considered as flat and will be less "
"aggressively unrolled."));
static cl::opt<bool>
UnrollAllowPeeling("unroll-allow-peeling", cl::Hidden,
cl::desc("Allows loops to be peeled when the dynamic "
"trip count is known to be low."));
/// A magic value for use with the Threshold parameter to indicate
/// that the loop unroll should be performed regardless of how much
/// code expansion would result.
@ -129,6 +133,7 @@ static TargetTransformInfo::UnrollingPreferences gatherUnrollingPreferences(
UP.PartialThreshold = UP.Threshold;
UP.PartialOptSizeThreshold = 0;
UP.Count = 0;
UP.PeelCount = 0;
UP.DefaultUnrollRuntimeCount = 8;
UP.MaxCount = UINT_MAX;
UP.FullUnrollMaxCount = UINT_MAX;
@ -139,6 +144,7 @@ static TargetTransformInfo::UnrollingPreferences gatherUnrollingPreferences(
UP.AllowExpensiveTripCount = false;
UP.Force = false;
UP.UpperBound = false;
UP.AllowPeeling = false;
// Override with any target specific settings
TTI.getUnrollingPreferences(L, UP);
@ -171,6 +177,8 @@ static TargetTransformInfo::UnrollingPreferences gatherUnrollingPreferences(
UP.Runtime = UnrollRuntime;
if (UnrollMaxUpperBound == 0)
UP.UpperBound = false;
if (UnrollAllowPeeling.getNumOccurrences() > 0)
UP.AllowPeeling = UnrollAllowPeeling;
// Apply user values provided by argument
if (UserThreshold.hasValue()) {
@ -754,16 +762,6 @@ static bool computeUnrollCount(
bool ExplicitUnroll = PragmaCount > 0 || PragmaFullUnroll ||
PragmaEnableUnroll || UserUnrollCount;
// Check if the runtime trip count is too small when profile is available.
if (L->getHeader()->getParent()->getEntryCount() && TripCount == 0) {
if (auto ProfileTripCount = getLoopEstimatedTripCount(L)) {
if (*ProfileTripCount < FlatLoopTripCountThreshold)
return false;
else
UP.AllowExpensiveTripCount = true;
}
}
if (ExplicitUnroll && TripCount != 0) {
// If the loop has an unrolling pragma, we want to be more aggressive with
// unrolling limits. Set thresholds to at least the PragmaThreshold value
@ -878,12 +876,31 @@ static bool computeUnrollCount(
<< "Unable to fully unroll loop as directed by unroll(full) pragma "
"because loop has a runtime trip count.");
// 5th priority is runtime unrolling.
// 5th priority is loop peeling
computePeelCount(L, LoopSize, UP);
if (UP.PeelCount) {
UP.Runtime = false;
UP.Count = 1;
return ExplicitUnroll;
}
// 6th priority is runtime unrolling.
// Don't unroll a runtime trip count loop when it is disabled.
if (HasRuntimeUnrollDisablePragma(L)) {
UP.Count = 0;
return false;
}
// Check if the runtime trip count is too small when profile is available.
if (L->getHeader()->getParent()->getEntryCount()) {
if (auto ProfileTripCount = getLoopEstimatedTripCount(L)) {
if (*ProfileTripCount < FlatLoopTripCountThreshold)
return false;
else
UP.AllowExpensiveTripCount = true;
}
}
// Reduce count based on the type of unrolling and the threshold values.
UP.Runtime |= PragmaEnableUnroll || PragmaCount > 0 || UserUnrollCount;
if (!UP.Runtime) {
@ -1042,13 +1059,17 @@ static bool tryToUnrollLoop(Loop *L, DominatorTree &DT, LoopInfo *LI,
// Unroll the loop.
if (!UnrollLoop(L, UP.Count, TripCount, UP.Force, UP.Runtime,
UP.AllowExpensiveTripCount, UseUpperBound, MaxOrZero,
TripMultiple, LI, SE, &DT, &AC, &ORE, PreserveLCSSA))
TripMultiple, UP.PeelCount, LI, SE, &DT, &AC, &ORE,
PreserveLCSSA))
return false;
// If loop has an unroll count pragma or unrolled by explicitly set count
// mark loop as unrolled to prevent unrolling beyond that requested.
if (IsCountSetExplicitly)
// If the loop was peeled, we already "used up" the profile information
// we had, so we don't want to unroll or peel again.
if (IsCountSetExplicitly || UP.PeelCount)
SetLoopAlreadyUnrolled(L);
return true;
}

View File

@ -26,6 +26,7 @@ add_llvm_library(LLVMTransformUtils
Local.cpp
LoopSimplify.cpp
LoopUnroll.cpp
LoopUnrollPeel.cpp
LoopUnrollRuntime.cpp
LoopUtils.cpp
LoopVersioning.cpp

View File

@ -202,6 +202,9 @@ static bool needToInsertPhisForLCSSA(Loop *L, std::vector<BasicBlock *> Blocks,
/// runtime-unroll the loop if computing RuntimeTripCount will be expensive and
/// AllowExpensiveTripCount is false.
///
/// If we want to perform PGO-based loop peeling, PeelCount is set to the
/// number of iterations we want to peel off.
///
/// The LoopInfo Analysis that is passed will be kept consistent.
///
/// This utility preserves LoopInfo. It will also preserve ScalarEvolution and
@ -209,9 +212,11 @@ static bool needToInsertPhisForLCSSA(Loop *L, std::vector<BasicBlock *> Blocks,
bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool Force,
bool AllowRuntime, bool AllowExpensiveTripCount,
bool PreserveCondBr, bool PreserveOnlyFirst,
unsigned TripMultiple, LoopInfo *LI, ScalarEvolution *SE,
DominatorTree *DT, AssumptionCache *AC,
OptimizationRemarkEmitter *ORE, bool PreserveLCSSA) {
unsigned TripMultiple, unsigned PeelCount, LoopInfo *LI,
ScalarEvolution *SE, DominatorTree *DT,
AssumptionCache *AC, OptimizationRemarkEmitter *ORE,
bool PreserveLCSSA) {
BasicBlock *Preheader = L->getLoopPreheader();
if (!Preheader) {
DEBUG(dbgs() << " Can't unroll; loop preheader-insertion failed.\n");
@ -257,9 +262,8 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool Force,
if (TripCount != 0 && Count > TripCount)
Count = TripCount;
// Don't enter the unroll code if there is nothing to do. This way we don't
// need to support "partial unrolling by 1".
if (TripCount == 0 && Count < 2)
// Don't enter the unroll code if there is nothing to do.
if (TripCount == 0 && Count < 2 && PeelCount == 0)
return false;
assert(Count > 0);
@ -288,6 +292,13 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool Force,
// flag is specified.
bool RuntimeTripCount = (TripCount == 0 && Count > 0 && AllowRuntime);
assert((!RuntimeTripCount || !PeelCount) &&
"Did not expect runtime trip-count unrolling "
"and peeling for the same loop");
if (PeelCount)
peelLoop(L, PeelCount, LI, SE, DT, PreserveLCSSA);
// Loops containing convergent instructions must have a count that divides
// their TripMultiple.
DEBUG(
@ -301,9 +312,7 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool Force,
"Unroll count must divide trip multiple if loop contains a "
"convergent operation.");
});
// Don't output the runtime loop remainder if Count is a multiple of
// TripMultiple. Such a remainder is never needed, and is unsafe if the loop
// contains a convergent instruction.
if (RuntimeTripCount && TripMultiple % Count != 0 &&
!UnrollRuntimeLoopRemainder(L, Count, AllowExpensiveTripCount,
UnrollRuntimeEpilog, LI, SE, DT,
@ -339,6 +348,13 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool Force,
L->getHeader())
<< "completely unrolled loop with "
<< NV("UnrollCount", TripCount) << " iterations");
} else if (PeelCount) {
DEBUG(dbgs() << "PEELING loop %" << Header->getName()
<< " with iteration count " << PeelCount << "!\n");
ORE->emit(OptimizationRemark(DEBUG_TYPE, "Peeled", L->getStartLoc(),
L->getHeader())
<< " peeled loop by " << NV("PeelCount", PeelCount)
<< " iterations");
} else {
OptimizationRemark Diag(DEBUG_TYPE, "PartialUnrolled", L->getStartLoc(),
L->getHeader());
@ -628,7 +644,7 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool Force,
DEBUG(DT->verifyDomTree());
// Simplify any new induction variables in the partially unrolled loop.
if (SE && !CompletelyUnroll) {
if (SE && !CompletelyUnroll && Count > 1) {
SmallVector<WeakVH, 16> DeadInsts;
simplifyLoopIVs(L, SE, DT, LI, DeadInsts);

View File

@ -0,0 +1,405 @@
//===-- UnrollLoopPeel.cpp - Loop peeling utilities -----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements some loop unrolling utilities for peeling loops
// with dynamically inferred (from PGO) trip counts. See LoopUnroll.cpp for
// unrolling loops with compile-time constant trip counts.
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/LoopIterator.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
#include "llvm/Transforms/Utils/UnrollLoop.h"
#include <algorithm>
using namespace llvm;
#define DEBUG_TYPE "loop-unroll"
STATISTIC(NumPeeled, "Number of loops peeled");
static cl::opt<unsigned> UnrollPeelMaxCount(
"unroll-peel-max-count", cl::init(7), cl::Hidden,
cl::desc("Max average trip count which will cause loop peeling."));
static cl::opt<unsigned> UnrollForcePeelCount(
"unroll-force-peel-count", cl::init(0), cl::Hidden,
cl::desc("Force a peel count regardless of profiling information."));
// Check whether we are capable of peeling this loop.
static bool canPeel(Loop *L) {
// Make sure the loop is in simplified form
if (!L->isLoopSimplifyForm())
return false;
// Only peel loops that contain a single exit
if (!L->getExitingBlock() || !L->getUniqueExitBlock())
return false;
return true;
}
// Return the number of iterations we want to peel off.
void llvm::computePeelCount(Loop *L, unsigned LoopSize,
TargetTransformInfo::UnrollingPreferences &UP) {
UP.PeelCount = 0;
if (!canPeel(L))
return;
// Only try to peel innermost loops.
if (!L->empty())
return;
// If the user provided a peel count, use that.
bool UserPeelCount = UnrollForcePeelCount.getNumOccurrences() > 0;
if (UserPeelCount) {
DEBUG(dbgs() << "Force-peeling first " << UnrollForcePeelCount
<< " iterations.\n");
UP.PeelCount = UnrollForcePeelCount;
return;
}
// If we don't know the trip count, but have reason to believe the average
// trip count is low, peeling should be beneficial, since we will usually
// hit the peeled section.
// We only do this in the presence of profile information, since otherwise
// our estimates of the trip count are not reliable enough.
if (UP.AllowPeeling && L->getHeader()->getParent()->getEntryCount()) {
Optional<unsigned> PeelCount = getLoopEstimatedTripCount(L);
if (!PeelCount)
return;
DEBUG(dbgs() << "Profile-based estimated trip count is " << *PeelCount
<< "\n");
if (*PeelCount) {
if ((*PeelCount <= UnrollPeelMaxCount) &&
(LoopSize * (*PeelCount + 1) <= UP.Threshold)) {
DEBUG(dbgs() << "Peeling first " << *PeelCount << " iterations.\n");
UP.PeelCount = *PeelCount;
return;
}
DEBUG(dbgs() << "Requested peel count: " << *PeelCount << "\n");
DEBUG(dbgs() << "Max peel count: " << UnrollPeelMaxCount << "\n");
DEBUG(dbgs() << "Peel cost: " << LoopSize * (*PeelCount + 1) << "\n");
DEBUG(dbgs() << "Max peel cost: " << UP.Threshold << "\n");
}
}
return;
}
/// \brief Update the branch weights of the latch of a peeled-off loop
/// iteration.
/// This sets the branch weights for the latch of the recently peeled off loop
/// iteration correctly.
/// Our goal is to make sure that:
/// a) The total weight of all the copies of the loop body is preserved.
/// b) The total weight of the loop exit is preserved.
/// c) The body weight is reasonably distributed between the peeled iterations.
///
/// \param Header The copy of the header block that belongs to next iteration.
/// \param LatchBR The copy of the latch branch that belongs to this iteration.
/// \param IterNumber The serial number of the iteration that was just
/// peeled off.
/// \param AvgIters The average number of iterations we expect the loop to have.
/// \param[in,out] PeeledHeaderWeight The total number of dynamic loop
/// iterations that are unaccounted for. As an input, it represents the number
/// of times we expect to enter the header of the iteration currently being
/// peeled off. The output is the number of times we expect to enter the
/// header of the next iteration.
static void updateBranchWeights(BasicBlock *Header, BranchInst *LatchBR,
unsigned IterNumber, unsigned AvgIters,
uint64_t &PeeledHeaderWeight) {
// FIXME: Pick a more realistic distribution.
// Currently the proportion of weight we assign to the fall-through
// side of the branch drops linearly with the iteration number, and we use
// a 0.9 fudge factor to make the drop-off less sharp...
if (PeeledHeaderWeight) {
uint64_t FallThruWeight =
PeeledHeaderWeight * ((float)(AvgIters - IterNumber) / AvgIters * 0.9);
uint64_t ExitWeight = PeeledHeaderWeight - FallThruWeight;
PeeledHeaderWeight -= ExitWeight;
unsigned HeaderIdx = (LatchBR->getSuccessor(0) == Header ? 0 : 1);
MDBuilder MDB(LatchBR->getContext());
MDNode *WeightNode =
HeaderIdx ? MDB.createBranchWeights(ExitWeight, FallThruWeight)
: MDB.createBranchWeights(FallThruWeight, ExitWeight);
LatchBR->setMetadata(LLVMContext::MD_prof, WeightNode);
}
}
/// \brief Clones the body of the loop L, putting it between \p InsertTop and \p
/// InsertBot.
/// \param IterNumber The serial number of the iteration currently being
/// peeled off.
/// \param Exit The exit block of the original loop.
/// \param[out] NewBlocks A list of the the blocks in the newly created clone
/// \param[out] VMap The value map between the loop and the new clone.
/// \param LoopBlocks A helper for DFS-traversal of the loop.
/// \param LVMap A value-map that maps instructions from the original loop to
/// instructions in the last peeled-off iteration.
static void cloneLoopBlocks(Loop *L, unsigned IterNumber, BasicBlock *InsertTop,
BasicBlock *InsertBot, BasicBlock *Exit,
SmallVectorImpl<BasicBlock *> &NewBlocks,
LoopBlocksDFS &LoopBlocks, ValueToValueMapTy &VMap,
ValueToValueMapTy &LVMap, LoopInfo *LI) {
BasicBlock *Header = L->getHeader();
BasicBlock *Latch = L->getLoopLatch();
BasicBlock *PreHeader = L->getLoopPreheader();
Function *F = Header->getParent();
LoopBlocksDFS::RPOIterator BlockBegin = LoopBlocks.beginRPO();
LoopBlocksDFS::RPOIterator BlockEnd = LoopBlocks.endRPO();
Loop *ParentLoop = L->getParentLoop();
// For each block in the original loop, create a new copy,
// and update the value map with the newly created values.
for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) {
BasicBlock *NewBB = CloneBasicBlock(*BB, VMap, ".peel", F);
NewBlocks.push_back(NewBB);
if (ParentLoop)
ParentLoop->addBasicBlockToLoop(NewBB, *LI);
VMap[*BB] = NewBB;
}
// Hook-up the control flow for the newly inserted blocks.
// The new header is hooked up directly to the "top", which is either
// the original loop preheader (for the first iteration) or the previous
// iteration's exiting block (for every other iteration)
InsertTop->getTerminator()->setSuccessor(0, cast<BasicBlock>(VMap[Header]));
// Similarly, for the latch:
// The original exiting edge is still hooked up to the loop exit.
// The backedge now goes to the "bottom", which is either the loop's real
// header (for the last peeled iteration) or the copied header of the next
// iteration (for every other iteration)
BranchInst *LatchBR =
cast<BranchInst>(cast<BasicBlock>(VMap[Latch])->getTerminator());
unsigned HeaderIdx = (LatchBR->getSuccessor(0) == Header ? 0 : 1);
LatchBR->setSuccessor(HeaderIdx, InsertBot);
LatchBR->setSuccessor(1 - HeaderIdx, Exit);
// The new copy of the loop body starts with a bunch of PHI nodes
// that pick an incoming value from either the preheader, or the previous
// loop iteration. Since this copy is no longer part of the loop, we
// resolve this statically:
// For the first iteration, we use the value from the preheader directly.
// For any other iteration, we replace the phi with the value generated by
// the immediately preceding clone of the loop body (which represents
// the previous iteration).
for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
PHINode *NewPHI = cast<PHINode>(VMap[&*I]);
if (IterNumber == 0) {
VMap[&*I] = NewPHI->getIncomingValueForBlock(PreHeader);
} else {
Value *LatchVal = NewPHI->getIncomingValueForBlock(Latch);
Instruction *LatchInst = dyn_cast<Instruction>(LatchVal);
if (LatchInst && L->contains(LatchInst))
VMap[&*I] = LVMap[LatchInst];
else
VMap[&*I] = LatchVal;
}
cast<BasicBlock>(VMap[Header])->getInstList().erase(NewPHI);
}
// Fix up the outgoing values - we need to add a value for the iteration
// we've just created. Note that this must happen *after* the incoming
// values are adjusted, since the value going out of the latch may also be
// a value coming into the header.
for (BasicBlock::iterator I = Exit->begin(); isa<PHINode>(I); ++I) {
PHINode *PHI = cast<PHINode>(I);
Value *LatchVal = PHI->getIncomingValueForBlock(Latch);
Instruction *LatchInst = dyn_cast<Instruction>(LatchVal);
if (LatchInst && L->contains(LatchInst))
LatchVal = VMap[LatchVal];
PHI->addIncoming(LatchVal, cast<BasicBlock>(VMap[Latch]));
}
// LastValueMap is updated with the values for the current loop
// which are used the next time this function is called.
for (const auto &KV : VMap)
LVMap[KV.first] = KV.second;
}
/// \brief Peel off the first \p PeelCount iterations of loop \p L.
///
/// Note that this does not peel them off as a single straight-line block.
/// Rather, each iteration is peeled off separately, and needs to check the
/// exit condition.
/// For loops that dynamically execute \p PeelCount iterations or less
/// this provides a benefit, since the peeled off iterations, which account
/// for the bulk of dynamic execution, can be further simplified by scalar
/// optimizations.
bool llvm::peelLoop(Loop *L, unsigned PeelCount, LoopInfo *LI,
ScalarEvolution *SE, DominatorTree *DT,
bool PreserveLCSSA) {
if (!canPeel(L))
return false;
LoopBlocksDFS LoopBlocks(L);
LoopBlocks.perform(LI);
BasicBlock *Header = L->getHeader();
BasicBlock *PreHeader = L->getLoopPreheader();
BasicBlock *Latch = L->getLoopLatch();
BasicBlock *Exit = L->getUniqueExitBlock();
Function *F = Header->getParent();
// Set up all the necessary basic blocks. It is convenient to split the
// preheader into 3 parts - two blocks to anchor the peeled copy of the loop
// body, and a new preheader for the "real" loop.
// Peeling the first iteration transforms.
//
// PreHeader:
// ...
// Header:
// LoopBody
// If (cond) goto Header
// Exit:
//
// into
//
// InsertTop:
// LoopBody
// If (!cond) goto Exit
// InsertBot:
// NewPreHeader:
// ...
// Header:
// LoopBody
// If (cond) goto Header
// Exit:
//
// Each following iteration will split the current bottom anchor in two,
// and put the new copy of the loop body between these two blocks. That is,
// after peeling another iteration from the example above, we'll split
// InsertBot, and get:
//
// InsertTop:
// LoopBody
// If (!cond) goto Exit
// InsertBot:
// LoopBody
// If (!cond) goto Exit
// InsertBot.next:
// NewPreHeader:
// ...
// Header:
// LoopBody
// If (cond) goto Header
// Exit:
BasicBlock *InsertTop = SplitEdge(PreHeader, Header, DT, LI);
BasicBlock *InsertBot =
SplitBlock(InsertTop, InsertTop->getTerminator(), DT, LI);
BasicBlock *NewPreHeader =
SplitBlock(InsertBot, InsertBot->getTerminator(), DT, LI);
InsertTop->setName(Header->getName() + ".peel.begin");
InsertBot->setName(Header->getName() + ".peel.next");
NewPreHeader->setName(PreHeader->getName() + ".peel.newph");
ValueToValueMapTy LVMap;
// If we have branch weight information, we'll want to update it for the
// newly created branches.
BranchInst *LatchBR =
cast<BranchInst>(cast<BasicBlock>(Latch)->getTerminator());
unsigned HeaderIdx = (LatchBR->getSuccessor(0) == Header ? 0 : 1);
uint64_t TrueWeight, FalseWeight;
uint64_t ExitWeight = 0, BackEdgeWeight = 0;
if (LatchBR->extractProfMetadata(TrueWeight, FalseWeight)) {
ExitWeight = HeaderIdx ? TrueWeight : FalseWeight;
BackEdgeWeight = HeaderIdx ? FalseWeight : TrueWeight;
}
// For each peeled-off iteration, make a copy of the loop.
for (unsigned Iter = 0; Iter < PeelCount; ++Iter) {
SmallVector<BasicBlock *, 8> NewBlocks;
ValueToValueMapTy VMap;
// The exit weight of the previous iteration is the header entry weight
// of the current iteration. So this is exactly how many dynamic iterations
// the current peeled-off static iteration uses up.
// FIXME: due to the way the distribution is constructed, we need a
// guard here to make sure we don't end up with non-positive weights.
if (ExitWeight < BackEdgeWeight)
BackEdgeWeight -= ExitWeight;
else
BackEdgeWeight = 1;
cloneLoopBlocks(L, Iter, InsertTop, InsertBot, Exit,
NewBlocks, LoopBlocks, VMap, LVMap, LI);
updateBranchWeights(InsertBot, cast<BranchInst>(VMap[LatchBR]), Iter,
PeelCount, ExitWeight);
InsertTop = InsertBot;
InsertBot = SplitBlock(InsertBot, InsertBot->getTerminator(), DT, LI);
InsertBot->setName(Header->getName() + ".peel.next");
F->getBasicBlockList().splice(InsertTop->getIterator(),
F->getBasicBlockList(),
NewBlocks[0]->getIterator(), F->end());
// Remap to use values from the current iteration instead of the
// previous one.
remapInstructionsInBlocks(NewBlocks, VMap);
}
// Now adjust the phi nodes in the loop header to get their initial values
// from the last peeled-off iteration instead of the preheader.
for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
PHINode *PHI = cast<PHINode>(I);
Value *NewVal = PHI->getIncomingValueForBlock(Latch);
Instruction *LatchInst = dyn_cast<Instruction>(NewVal);
if (LatchInst && L->contains(LatchInst))
NewVal = LVMap[LatchInst];
PHI->setIncomingValue(PHI->getBasicBlockIndex(NewPreHeader), NewVal);
}
// Adjust the branch weights on the loop exit.
if (ExitWeight) {
MDBuilder MDB(LatchBR->getContext());
MDNode *WeightNode =
HeaderIdx ? MDB.createBranchWeights(ExitWeight, BackEdgeWeight)
: MDB.createBranchWeights(BackEdgeWeight, ExitWeight);
LatchBR->setMetadata(LLVMContext::MD_prof, WeightNode);
}
// If the loop is nested, we changed the parent loop, update SE.
if (Loop *ParentLoop = L->getParentLoop())
SE->forgetLoop(ParentLoop);
NumPeeled++;
return true;
}

View File

@ -1090,16 +1090,16 @@ Optional<unsigned> llvm::getLoopEstimatedTripCount(Loop *L) {
// from the raw counts to provide a better probability estimate. Remove
// the adjustment by subtracting 1 from both weights.
uint64_t TrueVal, FalseVal;
if (!LatchBR->extractProfMetadata(TrueVal, FalseVal) || (TrueVal <= 1) ||
(FalseVal <= 1))
if (!LatchBR->extractProfMetadata(TrueVal, FalseVal))
return None;
TrueVal -= 1;
FalseVal -= 1;
if (!TrueVal || !FalseVal)
return 0;
// Divide the count of the backedge by the count of the edge exiting the loop.
// Divide the count of the backedge by the count of the edge exiting the loop,
// rounding to nearest.
if (LatchBR->getSuccessor(0) == L->getHeader())
return TrueVal / FalseVal;
return (TrueVal + (FalseVal / 2)) / FalseVal;
else
return FalseVal / TrueVal;
return (FalseVal + (TrueVal / 2)) / TrueVal;
}

View File

@ -0,0 +1,47 @@
; RUN: opt < %s -S -debug-only=loop-unroll -loop-unroll -unroll-allow-peeling 2>&1 | FileCheck %s
; REQUIRES: asserts
; Make sure we use the profile information correctly to peel-off 3 iterations
; from the loop, and update the branch weights for the peeled loop properly.
; CHECK: PEELING loop %for.body with iteration count 3!
; CHECK-LABEL: @basic
; CHECK: br i1 %{{.*}}, label %[[NEXT0:.*]], label %for.cond.for.end_crit_edge, !prof !1
; CHECK: [[NEXT0]]:
; CHECK: br i1 %{{.*}}, label %[[NEXT1:.*]], label %for.cond.for.end_crit_edge, !prof !2
; CHECK: [[NEXT1]]:
; CHECK: br i1 %{{.*}}, label %[[NEXT2:.*]], label %for.cond.for.end_crit_edge, !prof !3
; CHECK: [[NEXT2]]:
; CHECK: br i1 %{{.*}}, label %for.body, label %{{.*}}, !prof !4
define void @basic(i32* %p, i32 %k) #0 !prof !0 {
entry:
%cmp3 = icmp slt i32 0, %k
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body.lr.ph: ; preds = %entry
br label %for.body
for.body: ; preds = %for.body.lr.ph, %for.body
%i.05 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%p.addr.04 = phi i32* [ %p, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
%incdec.ptr = getelementptr inbounds i32, i32* %p.addr.04, i32 1
store i32 %i.05, i32* %p.addr.04, align 4
%inc = add nsw i32 %i.05, 1
%cmp = icmp slt i32 %inc, %k
br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge, !prof !1
for.cond.for.end_crit_edge: ; preds = %for.body
br label %for.end
for.end: ; preds = %for.cond.for.end_crit_edge, %entry
ret void
}
!0 = !{!"function_entry_count", i64 1}
!1 = !{!"branch_weights", i32 3001, i32 1001}
;CHECK: !1 = !{!"branch_weights", i32 900, i32 101}
;CHECK: !2 = !{!"branch_weights", i32 540, i32 360}
;CHECK: !3 = !{!"branch_weights", i32 162, i32 378}
;CHECK: !4 = !{!"branch_weights", i32 560, i32 162}

View File

@ -0,0 +1,96 @@
; RUN: opt < %s -S -loop-unroll -unroll-force-peel-count=3 -simplifycfg -instcombine | FileCheck %s
; Basic loop peeling - check that we can peel-off the first 3 loop iterations
; when explicitly requested.
; CHECK-LABEL: @basic
; CHECK: %[[CMP0:.*]] = icmp sgt i32 %k, 0
; CHECK: br i1 %[[CMP0]], label %[[NEXT0:.*]], label %for.end
; CHECK: [[NEXT0]]:
; CHECK: store i32 0, i32* %p, align 4
; CHECK: %[[CMP1:.*]] = icmp eq i32 %k, 1
; CHECK: br i1 %[[CMP1]], label %for.end, label %[[NEXT1:.*]]
; CHECK: [[NEXT1]]:
; CHECK: %[[INC1:.*]] = getelementptr inbounds i32, i32* %p, i64 1
; CHECK: store i32 1, i32* %[[INC1]], align 4
; CHECK: %[[CMP2:.*]] = icmp sgt i32 %k, 2
; CHECK: br i1 %[[CMP2]], label %[[NEXT2:.*]], label %for.end
; CHECK: [[NEXT2]]:
; CHECK: %[[INC2:.*]] = getelementptr inbounds i32, i32* %p, i64 2
; CHECK: store i32 2, i32* %[[INC2]], align 4
; CHECK: %[[CMP3:.*]] = icmp eq i32 %k, 3
; CHECK: br i1 %[[CMP3]], label %for.end, label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: %[[IV:.*]] = phi i32 [ {{.*}}, %[[LOOP]] ], [ 3, %[[NEXT2]] ]
define void @basic(i32* %p, i32 %k) #0 {
entry:
%cmp3 = icmp slt i32 0, %k
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body.lr.ph: ; preds = %entry
br label %for.body
for.body: ; preds = %for.body.lr.ph, %for.body
%i.05 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%p.addr.04 = phi i32* [ %p, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
%incdec.ptr = getelementptr inbounds i32, i32* %p.addr.04, i32 1
store i32 %i.05, i32* %p.addr.04, align 4
%inc = add nsw i32 %i.05, 1
%cmp = icmp slt i32 %inc, %k
br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
for.cond.for.end_crit_edge: ; preds = %for.body
br label %for.end
for.end: ; preds = %for.cond.for.end_crit_edge, %entry
ret void
}
; Make sure peeling works correctly when a value defined in a loop is used
; in later code - we need to correctly plumb the phi depending on which
; iteration is actually used.
; CHECK-LABEL: @output
; CHECK: %[[CMP0:.*]] = icmp sgt i32 %k, 0
; CHECK: br i1 %[[CMP0]], label %[[NEXT0:.*]], label %for.end
; CHECK: [[NEXT0]]:
; CHECK: store i32 0, i32* %p, align 4
; CHECK: %[[CMP1:.*]] = icmp eq i32 %k, 1
; CHECK: br i1 %[[CMP1]], label %for.end, label %[[NEXT1:.*]]
; CHECK: [[NEXT1]]:
; CHECK: %[[INC1:.*]] = getelementptr inbounds i32, i32* %p, i64 1
; CHECK: store i32 1, i32* %[[INC1]], align 4
; CHECK: %[[CMP2:.*]] = icmp sgt i32 %k, 2
; CHECK: br i1 %[[CMP2]], label %[[NEXT2:.*]], label %for.end
; CHECK: [[NEXT2]]:
; CHECK: %[[INC2:.*]] = getelementptr inbounds i32, i32* %p, i64 2
; CHECK: store i32 2, i32* %[[INC2]], align 4
; CHECK: %[[CMP3:.*]] = icmp eq i32 %k, 3
; CHECK: br i1 %[[CMP3]], label %for.end, label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK: %[[IV:.*]] = phi i32 [ %[[IV:.*]], %[[LOOP]] ], [ 3, %[[NEXT2]] ]
; CHECK: %ret = phi i32 [ 0, %entry ], [ 1, %[[NEXT0]] ], [ 2, %[[NEXT1]] ], [ 3, %[[NEXT2]] ], [ %[[IV]], %[[LOOP]] ]
; CHECK: ret i32 %ret
define i32 @output(i32* %p, i32 %k) #0 {
entry:
%cmp3 = icmp slt i32 0, %k
br i1 %cmp3, label %for.body.lr.ph, label %for.end
for.body.lr.ph: ; preds = %entry
br label %for.body
for.body: ; preds = %for.body.lr.ph, %for.body
%i.05 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%p.addr.04 = phi i32* [ %p, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
%incdec.ptr = getelementptr inbounds i32, i32* %p.addr.04, i32 1
store i32 %i.05, i32* %p.addr.04, align 4
%inc = add nsw i32 %i.05, 1
%cmp = icmp slt i32 %inc, %k
br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
for.cond.for.end_crit_edge: ; preds = %for.body
br label %for.end
for.end: ; preds = %for.cond.for.end_crit_edge, %entry
%ret = phi i32 [ 0, %entry], [ %inc, %for.cond.for.end_crit_edge ]
ret i32 %ret
}