Revert BCmp Loop Idiom recognition transform (PR43870)

As discussed in https://bugs.llvm.org/show_bug.cgi?id=43870,
this transform is missing a crucial legality check:
the old (non-countable) loop would early-return upon first mismatch,
but there is no such guarantee for bcmp/memcmp.

We'd need to ensure that [PtrA, PtrA+NBytes) and [PtrB, PtrB+NBytes)
are fully dereferenceable memory regions. But that would limit
the transform to constant loop trip counts and would further
cripple it because dereferenceability analysis is *very* partial.

Furthermore, even if all that is done, every single test
would need to be rewritten from scratch.

So let's just give up.
This commit is contained in:
Roman Lebedev 2019-11-02 12:39:02 +03:00
parent 79d8996d73
commit c4b757be02
No known key found for this signature in database
GPG Key ID: 083C3EBB4A1689E0
6 changed files with 8 additions and 4028 deletions

View File

@ -81,9 +81,6 @@ Non-comprehensive list of changes in this release
Undefined Behaviour Sanitizer ``-fsanitize=pointer-overflow`` check
will now catch such cases.
* The Loop Idiom Recognition (``-loop-idiom``) pass has learned to recognize
``bcmp`` pattern, and convert it into a call to ``bcmp`` (or ``memcmp``)
function.
* Windows Control Flow Guard: the ``-cfguard`` option now emits CFG checks on
indirect function calls. The previous behavior is still available with the

View File

@ -41,7 +41,6 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
@ -78,20 +77,16 @@
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/IR/Verifier.h"
#include "llvm/Pass.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Scalar/LoopPassManager.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/BuildLibCalls.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
@ -107,7 +102,6 @@ using namespace llvm;
STATISTIC(NumMemSet, "Number of memset's formed from loop stores");
STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores");
STATISTIC(NumBCmp, "Number of memcmp's formed from loop 2xload+eq-compare");
static cl::opt<bool> UseLIRCodeSizeHeurs(
"use-lir-code-size-heurs",
@ -117,26 +111,6 @@ static cl::opt<bool> UseLIRCodeSizeHeurs(
namespace {
// FIXME: reinventing the wheel much? Is there a cleaner solution?
struct PMAbstraction {
virtual void markLoopAsDeleted(Loop *L) = 0;
virtual ~PMAbstraction() = default;
};
struct LegacyPMAbstraction : PMAbstraction {
LPPassManager &LPM;
LegacyPMAbstraction(LPPassManager &LPM) : LPM(LPM) {}
virtual ~LegacyPMAbstraction() = default;
void markLoopAsDeleted(Loop *L) override { LPM.markLoopAsDeleted(*L); }
};
struct NewPMAbstraction : PMAbstraction {
LPMUpdater &Updater;
NewPMAbstraction(LPMUpdater &Updater) : Updater(Updater) {}
virtual ~NewPMAbstraction() = default;
void markLoopAsDeleted(Loop *L) override {
Updater.markLoopAsDeleted(*L, L->getName());
}
};
class LoopIdiomRecognize {
Loop *CurLoop = nullptr;
AliasAnalysis *AA;
@ -146,7 +120,6 @@ class LoopIdiomRecognize {
TargetLibraryInfo *TLI;
const TargetTransformInfo *TTI;
const DataLayout *DL;
PMAbstraction &LoopDeleter;
OptimizationRemarkEmitter &ORE;
bool ApplyCodeSizeHeuristics;
@ -155,10 +128,9 @@ public:
LoopInfo *LI, ScalarEvolution *SE,
TargetLibraryInfo *TLI,
const TargetTransformInfo *TTI,
const DataLayout *DL, PMAbstraction &LoopDeleter,
const DataLayout *DL,
OptimizationRemarkEmitter &ORE)
: AA(AA), DT(DT), LI(LI), SE(SE), TLI(TLI), TTI(TTI), DL(DL),
LoopDeleter(LoopDeleter), ORE(ORE) {}
: AA(AA), DT(DT), LI(LI), SE(SE), TLI(TLI), TTI(TTI), DL(DL), ORE(ORE) {}
bool runOnLoop(Loop *L);
@ -172,8 +144,6 @@ private:
bool HasMemset;
bool HasMemsetPattern;
bool HasMemcpy;
bool HasMemCmp;
bool HasBCmp;
/// Return code for isLegalStore()
enum LegalStoreKind {
@ -216,32 +186,6 @@ private:
bool runOnNoncountableLoop();
struct CmpLoopStructure {
Value *BCmpValue, *LatchCmpValue;
BasicBlock *HeaderBrEqualBB, *HeaderBrUnequalBB;
BasicBlock *LatchBrFinishBB, *LatchBrContinueBB;
};
bool matchBCmpLoopStructure(CmpLoopStructure &CmpLoop) const;
struct CmpOfLoads {
ICmpInst::Predicate BCmpPred;
Value *LoadSrcA, *LoadSrcB;
Value *LoadA, *LoadB;
};
bool matchBCmpOfLoads(Value *BCmpValue, CmpOfLoads &CmpOfLoads) const;
bool recognizeBCmpLoopControlFlow(const CmpOfLoads &CmpOfLoads,
CmpLoopStructure &CmpLoop) const;
bool recognizeBCmpLoopSCEV(uint64_t BCmpTyBytes, CmpOfLoads &CmpOfLoads,
const SCEV *&SrcA, const SCEV *&SrcB,
const SCEV *&Iterations) const;
bool detectBCmpIdiom(ICmpInst *&BCmpInst, CmpInst *&LatchCmpInst,
LoadInst *&LoadA, LoadInst *&LoadB, const SCEV *&SrcA,
const SCEV *&SrcB, const SCEV *&NBytes) const;
BasicBlock *transformBCmpControlFlow(ICmpInst *ComparedEqual);
void transformLoopToBCmp(ICmpInst *BCmpInst, CmpInst *LatchCmpInst,
LoadInst *LoadA, LoadInst *LoadB, const SCEV *SrcA,
const SCEV *SrcB, const SCEV *NBytes);
bool recognizeBCmp();
bool recognizePopcount();
void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst,
PHINode *CntPhi, Value *Var);
@ -279,14 +223,13 @@ public:
&getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
*L->getHeader()->getParent());
const DataLayout *DL = &L->getHeader()->getModule()->getDataLayout();
LegacyPMAbstraction LoopDeleter(LPM);
// For the old PM, we can't use OptimizationRemarkEmitter as an analysis
// pass. Function analyses need to be preserved across loop transformations
// but ORE cannot be preserved (see comment before the pass definition).
OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
LoopIdiomRecognize LIR(AA, DT, LI, SE, TLI, TTI, DL, LoopDeleter, ORE);
LoopIdiomRecognize LIR(AA, DT, LI, SE, TLI, TTI, DL, ORE);
return LIR.runOnLoop(L);
}
@ -305,7 +248,7 @@ char LoopIdiomRecognizeLegacyPass::ID = 0;
PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR,
LPMUpdater &Updater) {
LPMUpdater &) {
const auto *DL = &L.getHeader()->getModule()->getDataLayout();
const auto &FAM =
@ -319,9 +262,8 @@ PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L, LoopAnalysisManager &AM,
"LoopIdiomRecognizePass: OptimizationRemarkEmitterAnalysis not cached "
"at a higher level");
NewPMAbstraction LoopDeleter(Updater);
LoopIdiomRecognize LIR(&AR.AA, &AR.DT, &AR.LI, &AR.SE, &AR.TLI, &AR.TTI, DL,
LoopDeleter, *ORE);
*ORE);
if (!LIR.runOnLoop(&L))
return PreservedAnalyses::all();
@ -358,8 +300,7 @@ bool LoopIdiomRecognize::runOnLoop(Loop *L) {
// Disable loop idiom recognition if the function's name is a common idiom.
StringRef Name = L->getHeader()->getParent()->getName();
if (Name == "memset" || Name == "memcpy" || Name == "memcmp" ||
Name == "bcmp")
if (Name == "memset" || Name == "memcpy")
return false;
// Determine if code size heuristics need to be applied.
@ -369,10 +310,8 @@ bool LoopIdiomRecognize::runOnLoop(Loop *L) {
HasMemset = TLI->has(LibFunc_memset);
HasMemsetPattern = TLI->has(LibFunc_memset_pattern16);
HasMemcpy = TLI->has(LibFunc_memcpy);
HasMemCmp = TLI->has(LibFunc_memcmp);
HasBCmp = TLI->has(LibFunc_bcmp);
if (HasMemset || HasMemsetPattern || HasMemcpy || HasMemCmp || HasBCmp)
if (HasMemset || HasMemsetPattern || HasMemcpy)
if (SE->hasLoopInvariantBackedgeTakenCount(L))
return runOnCountableLoop();
@ -1211,7 +1150,7 @@ bool LoopIdiomRecognize::runOnNoncountableLoop() {
<< "] Noncountable Loop %"
<< CurLoop->getHeader()->getName() << "\n");
return recognizeBCmp() || recognizePopcount() || recognizeAndInsertFFS();
return recognizePopcount() || recognizeAndInsertFFS();
}
/// Check if the given conditional branch is based on the comparison between
@ -1885,811 +1824,3 @@ void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB,
// loop. The loop would otherwise not be deleted even if it becomes empty.
SE->forgetLoop(CurLoop);
}
bool LoopIdiomRecognize::matchBCmpLoopStructure(
CmpLoopStructure &CmpLoop) const {
ICmpInst::Predicate BCmpPred;
// We are looking for the following basic layout:
// PreheaderBB: <preheader> ; preds = ???
// <...>
// br label %LoopHeaderBB
// LoopHeaderBB: <header,exiting> ; preds = %PreheaderBB,%LoopLatchBB
// <...>
// %BCmpValue = icmp <...>
// br i1 %BCmpValue, label %LoopLatchBB, label %Successor0
// LoopLatchBB: <latch,exiting> ; preds = %LoopHeaderBB
// <...>
// %LatchCmpValue = <are we done, or do next iteration?>
// br i1 %LatchCmpValue, label %Successor1, label %LoopHeaderBB
// Successor0: <exit> ; preds = %LoopHeaderBB
// <...>
// Successor1: <exit> ; preds = %LoopLatchBB
// <...>
//
// Successor0 and Successor1 may or may not be the same basic block.
// Match basic frame-work of this supposedly-comparison loop.
using namespace PatternMatch;
if (!match(CurLoop->getHeader()->getTerminator(),
m_Br(m_CombineAnd(m_ICmp(BCmpPred, m_Value(), m_Value()),
m_Value(CmpLoop.BCmpValue)),
CmpLoop.HeaderBrEqualBB, CmpLoop.HeaderBrUnequalBB)) ||
!match(CurLoop->getLoopLatch()->getTerminator(),
m_Br(m_CombineAnd(m_Cmp(), m_Value(CmpLoop.LatchCmpValue)),
CmpLoop.LatchBrFinishBB, CmpLoop.LatchBrContinueBB))) {
LLVM_DEBUG(dbgs() << "Basic control-flow layout unrecognized.\n");
return false;
}
LLVM_DEBUG(dbgs() << "Recognized basic control-flow layout.\n");
return true;
}
bool LoopIdiomRecognize::matchBCmpOfLoads(Value *BCmpValue,
CmpOfLoads &CmpOfLoads) const {
using namespace PatternMatch;
LLVM_DEBUG(dbgs() << "Analyzing header icmp " << *BCmpValue
<< " as bcmp pattern.\n");
// Match bcmp-style loop header cmp. It must be an eq-icmp of loads. Example:
// %v0 = load <...>, <...>* %LoadSrcA
// %v1 = load <...>, <...>* %LoadSrcB
// %CmpLoop.BCmpValue = icmp eq <...> %v0, %v1
// There won't be any no-op bitcasts between load and icmp,
// they would have been transformed into a load of bitcast.
// FIXME: {b,mem}cmp() calls have the same semantics as icmp. Match them too.
if (!match(BCmpValue,
m_ICmp(CmpOfLoads.BCmpPred,
m_CombineAnd(m_Load(m_Value(CmpOfLoads.LoadSrcA)),
m_Value(CmpOfLoads.LoadA)),
m_CombineAnd(m_Load(m_Value(CmpOfLoads.LoadSrcB)),
m_Value(CmpOfLoads.LoadB)))) ||
!ICmpInst::isEquality(CmpOfLoads.BCmpPred)) {
LLVM_DEBUG(dbgs() << "Loop header icmp did not match bcmp pattern.\n");
return false;
}
LLVM_DEBUG(dbgs() << "Recognized header icmp as bcmp pattern with loads:\n\t"
<< *CmpOfLoads.LoadA << "\n\t" << *CmpOfLoads.LoadB
<< "\n");
// FIXME: handle memcmp pattern?
return true;
}
bool LoopIdiomRecognize::recognizeBCmpLoopControlFlow(
const CmpOfLoads &CmpOfLoads, CmpLoopStructure &CmpLoop) const {
BasicBlock *LoopHeaderBB = CurLoop->getHeader();
BasicBlock *LoopLatchBB = CurLoop->getLoopLatch();
// Be wary, comparisons can be inverted, canonicalize order.
// If this 'element' comparison passed, we expect to proceed to the next elt.
if (CmpOfLoads.BCmpPred != ICmpInst::Predicate::ICMP_EQ)
std::swap(CmpLoop.HeaderBrEqualBB, CmpLoop.HeaderBrUnequalBB);
// The predicate on loop latch does not matter, just canonicalize some order.
if (CmpLoop.LatchBrContinueBB != LoopHeaderBB)
std::swap(CmpLoop.LatchBrFinishBB, CmpLoop.LatchBrContinueBB);
SmallVector<BasicBlock *, 2> ExitBlocks;
CurLoop->getUniqueExitBlocks(ExitBlocks);
assert(ExitBlocks.size() <= 2U && "Can't have more than two exit blocks.");
// Check that control-flow between blocks is as expected.
if (CmpLoop.HeaderBrEqualBB != LoopLatchBB ||
CmpLoop.LatchBrContinueBB != LoopHeaderBB ||
!is_contained(ExitBlocks, CmpLoop.HeaderBrUnequalBB) ||
!is_contained(ExitBlocks, CmpLoop.LatchBrFinishBB)) {
LLVM_DEBUG(dbgs() << "Loop control-flow not recognized.\n");
return false;
}
assert(!is_contained(ExitBlocks, CmpLoop.HeaderBrEqualBB) &&
!is_contained(ExitBlocks, CmpLoop.LatchBrContinueBB) &&
"Unexpected exit edges.");
LLVM_DEBUG(dbgs() << "Recognized loop control-flow.\n");
LLVM_DEBUG(dbgs() << "Performing side-effect analysis on the loop.\n");
assert(CurLoop->isLCSSAForm(*DT) && "Should only get LCSSA-form loops here.");
// No loop instructions must be used outside of the loop. Since we are in
// LCSSA form, we only need to check successor block's PHI nodes's incoming
// values for incoming blocks that are the loop basic blocks.
for (const BasicBlock *ExitBB : ExitBlocks) {
for (const PHINode &PHI : ExitBB->phis()) {
for (const BasicBlock *LoopBB :
make_filter_range(PHI.blocks(), [this](BasicBlock *PredecessorBB) {
return CurLoop->contains(PredecessorBB);
})) {
const auto *I =
dyn_cast<Instruction>(PHI.getIncomingValueForBlock(LoopBB));
if (I && CurLoop->contains(I)) {
LLVM_DEBUG(dbgs()
<< "Loop contains instruction " << *I
<< " which is used outside of the loop in basic block "
<< ExitBB->getName() << " in phi node " << PHI << "\n");
return false;
}
}
}
}
// Similarly, the loop should not have any other observable side-effects
// other than the final comparison result.
for (BasicBlock *LoopBB : CurLoop->blocks()) {
for (Instruction &I : *LoopBB) {
if (isa<DbgInfoIntrinsic>(I)) // Ignore dbginfo.
continue; // FIXME: anything else? lifetime info?
if ((I.mayHaveSideEffects() || I.isAtomic() || I.isFenceLike()) &&
&I != CmpOfLoads.LoadA && &I != CmpOfLoads.LoadB) {
LLVM_DEBUG(
dbgs() << "Loop contains instruction with potential side-effects: "
<< I << "\n");
return false;
}
}
}
LLVM_DEBUG(dbgs() << "No loop instructions deemed to have side-effects.\n");
return true;
}
bool LoopIdiomRecognize::recognizeBCmpLoopSCEV(uint64_t BCmpTyBytes,
CmpOfLoads &CmpOfLoads,
const SCEV *&SrcA,
const SCEV *&SrcB,
const SCEV *&Iterations) const {
// Try to compute SCEV of the loads, for this loop's scope.
const auto *ScevForSrcA = dyn_cast<SCEVAddRecExpr>(
SE->getSCEVAtScope(CmpOfLoads.LoadSrcA, CurLoop));
const auto *ScevForSrcB = dyn_cast<SCEVAddRecExpr>(
SE->getSCEVAtScope(CmpOfLoads.LoadSrcB, CurLoop));
if (!ScevForSrcA || !ScevForSrcB) {
LLVM_DEBUG(dbgs() << "Failed to get SCEV expressions for load sources.\n");
return false;
}
LLVM_DEBUG(dbgs() << "Got SCEV expressions (at loop scope) for loads:\n\t"
<< *ScevForSrcA << "\n\t" << *ScevForSrcB << "\n");
// Loads must have folloving SCEV exprs: {%ptr,+,BCmpTyBytes}<%LoopHeaderBB>
const SCEV *RecStepForA = ScevForSrcA->getStepRecurrence(*SE);
const SCEV *RecStepForB = ScevForSrcB->getStepRecurrence(*SE);
if (!ScevForSrcA->isAffine() || !ScevForSrcB->isAffine() ||
ScevForSrcA->getLoop() != CurLoop || ScevForSrcB->getLoop() != CurLoop ||
RecStepForA != RecStepForB || !isa<SCEVConstant>(RecStepForA) ||
cast<SCEVConstant>(RecStepForA)->getAPInt() != BCmpTyBytes) {
LLVM_DEBUG(dbgs() << "Unsupported SCEV expressions for loads. Only support "
"affine SCEV expressions originating in the loop we "
"are analysing with identical constant positive step, "
"equal to the count of bytes compared. Got:\n\t"
<< *RecStepForA << "\n\t" << *RecStepForB << "\n");
return false;
// FIXME: can support BCmpTyBytes > Step.
// But will need to account for the extra bytes compared at the end.
}
SrcA = ScevForSrcA->getStart();
SrcB = ScevForSrcB->getStart();
LLVM_DEBUG(dbgs() << "Got SCEV expressions for load sources:\n\t" << *SrcA
<< "\n\t" << *SrcB << "\n");
// The load sources must be loop-invants that dominate the loop header.
if (SrcA == SE->getCouldNotCompute() || SrcB == SE->getCouldNotCompute() ||
!SE->isAvailableAtLoopEntry(SrcA, CurLoop) ||
!SE->isAvailableAtLoopEntry(SrcB, CurLoop)) {
LLVM_DEBUG(dbgs() << "Unsupported SCEV expressions for loads, unavaliable "
"prior to loop header.\n");
return false;
}
LLVM_DEBUG(dbgs() << "SCEV expressions for loads are acceptable.\n");
// bcmp / memcmp take length argument as size_t, so let's conservatively
// assume that the iteration count should be not wider than that.
Type *CmpFuncSizeTy = DL->getIntPtrType(SE->getContext());
// For how many iterations is loop guaranteed not to exit via LoopLatch?
// This is one less than the maximal number of comparisons,and is: n + -1
const SCEV *LoopExitCount =
SE->getExitCount(CurLoop, CurLoop->getLoopLatch());
LLVM_DEBUG(dbgs() << "Got SCEV expression for loop latch exit count: "
<< *LoopExitCount << "\n");
// Exit count, similarly, must be loop-invant that dominates the loop header.
if (LoopExitCount == SE->getCouldNotCompute() ||
!LoopExitCount->getType()->isIntOrPtrTy() ||
LoopExitCount->getType()->getScalarSizeInBits() >
CmpFuncSizeTy->getScalarSizeInBits() ||
!SE->isAvailableAtLoopEntry(LoopExitCount, CurLoop)) {
LLVM_DEBUG(dbgs() << "Unsupported SCEV expression for loop latch exit.\n");
return false;
}
// LoopExitCount is always one less than the actual count of iterations.
// Do this before cast, else we will be stuck with 1 + zext(-1 + n)
Iterations = SE->getAddExpr(
LoopExitCount, SE->getOne(LoopExitCount->getType()), SCEV::FlagNUW);
assert(Iterations != SE->getCouldNotCompute() &&
"Shouldn't fail to increment by one.");
LLVM_DEBUG(dbgs() << "Computed iteration count: " << *Iterations << "\n");
return true;
}
/// Return true iff the bcmp idiom is detected in the loop.
///
/// Additionally:
/// 1) \p BCmpInst is set to the root byte-comparison instruction.
/// 2) \p LatchCmpInst is set to the comparison that controls the latch.
/// 3) \p LoadA is set to the first LoadInst.
/// 4) \p LoadB is set to the second LoadInst.
/// 5) \p SrcA is set to the first source location that is being compared.
/// 6) \p SrcB is set to the second source location that is being compared.
/// 7) \p NBytes is set to the number of bytes to compare.
bool LoopIdiomRecognize::detectBCmpIdiom(ICmpInst *&BCmpInst,
CmpInst *&LatchCmpInst,
LoadInst *&LoadA, LoadInst *&LoadB,
const SCEV *&SrcA, const SCEV *&SrcB,
const SCEV *&NBytes) const {
LLVM_DEBUG(dbgs() << "Recognizing bcmp idiom\n");
// Give up if the loop is not in normal form, or has more than 2 blocks.
if (!CurLoop->isLoopSimplifyForm() || CurLoop->getNumBlocks() > 2) {
LLVM_DEBUG(dbgs() << "Basic loop structure unrecognized.\n");
return false;
}
LLVM_DEBUG(dbgs() << "Recognized basic loop structure.\n");
CmpLoopStructure CmpLoop;
if (!matchBCmpLoopStructure(CmpLoop))
return false;
CmpOfLoads CmpOfLoads;
if (!matchBCmpOfLoads(CmpLoop.BCmpValue, CmpOfLoads))
return false;
if (!recognizeBCmpLoopControlFlow(CmpOfLoads, CmpLoop))
return false;
BCmpInst = cast<ICmpInst>(CmpLoop.BCmpValue); // FIXME: is there no
LatchCmpInst = cast<CmpInst>(CmpLoop.LatchCmpValue); // way to combine
LoadA = cast<LoadInst>(CmpOfLoads.LoadA); // these cast with
LoadB = cast<LoadInst>(CmpOfLoads.LoadB); // m_Value() matcher?
Type *BCmpValTy = BCmpInst->getOperand(0)->getType();
LLVMContext &Context = BCmpValTy->getContext();
uint64_t BCmpTyBits = DL->getTypeSizeInBits(BCmpValTy);
static constexpr uint64_t ByteTyBits = 8;
LLVM_DEBUG(dbgs() << "Got comparison between values of type " << *BCmpValTy
<< " of size " << BCmpTyBits
<< " bits (while byte = " << ByteTyBits << " bits).\n");
// bcmp()/memcmp() minimal unit of work is a byte. Therefore we must check
// that we are dealing with a multiple of a byte here.
if (BCmpTyBits % ByteTyBits != 0) {
LLVM_DEBUG(dbgs() << "Value size is not a multiple of byte.\n");
return false;
// FIXME: could still be done under a run-time check that the total bit
// count is a multiple of a byte i guess? Or handle remainder separately?
}
// Each comparison is done on this many bytes.
uint64_t BCmpTyBytes = BCmpTyBits / ByteTyBits;
LLVM_DEBUG(dbgs() << "Size is exactly " << BCmpTyBytes
<< " bytes, eligible for bcmp conversion.\n");
const SCEV *Iterations;
if (!recognizeBCmpLoopSCEV(BCmpTyBytes, CmpOfLoads, SrcA, SrcB, Iterations))
return false;
// bcmp / memcmp take length argument as size_t, do promotion now.
Type *CmpFuncSizeTy = DL->getIntPtrType(Context);
Iterations = SE->getNoopOrZeroExtend(Iterations, CmpFuncSizeTy);
assert(Iterations != SE->getCouldNotCompute() && "Promotion failed.");
// Note that it didn't do ptrtoint cast, we will need to do it manually.
// We will be comparing *bytes*, not BCmpTy, we need to recalculate size.
// It's a multiplication, and it *could* overflow. But for it to overflow
// we'd want to compare more bytes than could be represented by size_t, But
// allocation functions also take size_t. So how'd you produce such buffer?
// FIXME: we likely need to actually check that we know this won't overflow,
// via llvm::computeOverflowForUnsignedMul().
NBytes = SE->getMulExpr(
Iterations, SE->getConstant(CmpFuncSizeTy, BCmpTyBytes), SCEV::FlagNUW);
assert(NBytes != SE->getCouldNotCompute() &&
"Shouldn't fail to increment by one.");
LLVM_DEBUG(dbgs() << "Computed total byte count: " << *NBytes << "\n");
if (LoadA->getPointerAddressSpace() != LoadB->getPointerAddressSpace() ||
LoadA->getPointerAddressSpace() != 0 || !LoadA->isSimple() ||
!LoadB->isSimple()) {
StringLiteral L("Unsupported loads in idiom - only support identical, "
"simple loads from address space 0.\n");
LLVM_DEBUG(dbgs() << L);
ORE.emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "BCmpIdiomUnsupportedLoads",
BCmpInst->getDebugLoc(),
CurLoop->getHeader())
<< L;
});
return false; // FIXME: support non-simple loads.
}
LLVM_DEBUG(dbgs() << "Recognized bcmp idiom\n");
ORE.emit([&]() {
return OptimizationRemarkAnalysis(DEBUG_TYPE, "RecognizedBCmpIdiom",
CurLoop->getStartLoc(),
CurLoop->getHeader())
<< "Loop recognized as a bcmp idiom";
});
return true;
}
BasicBlock *
LoopIdiomRecognize::transformBCmpControlFlow(ICmpInst *ComparedEqual) {
LLVM_DEBUG(dbgs() << "Transforming control-flow.\n");
SmallVector<DominatorTree::UpdateType, 8> DTUpdates;
BasicBlock *PreheaderBB = CurLoop->getLoopPreheader();
BasicBlock *HeaderBB = CurLoop->getHeader();
BasicBlock *LoopLatchBB = CurLoop->getLoopLatch();
SmallString<32> LoopName = CurLoop->getName();
Function *Func = PreheaderBB->getParent();
LLVMContext &Context = Func->getContext();
// Before doing anything, drop SCEV info.
SE->forgetLoop(CurLoop);
// Here we start with: (0/6)
// PreheaderBB: <preheader> ; preds = ???
// <...>
// %memcmp = call i32 @memcmp(i8* %LoadSrcA, i8* %LoadSrcB, i64 %Nbytes)
// %ComparedEqual = icmp eq <...> %memcmp, 0
// br label %LoopHeaderBB
// LoopHeaderBB: <header,exiting> ; preds = %PreheaderBB,%LoopLatchBB
// <...>
// br i1 %<...>, label %LoopLatchBB, label %Successor0BB
// LoopLatchBB: <latch,exiting> ; preds = %LoopHeaderBB
// <...>
// br i1 %<...>, label %Successor1BB, label %LoopHeaderBB
// Successor0BB: <exit> ; preds = %LoopHeaderBB
// %S0PHI = phi <...> [ <...>, %LoopHeaderBB ]
// <...>
// Successor1BB: <exit> ; preds = %LoopLatchBB
// %S1PHI = phi <...> [ <...>, %LoopLatchBB ]
// <...>
//
// Successor0 and Successor1 may or may not be the same basic block.
// Decouple the edge between loop preheader basic block and loop header basic
// block. Thus the loop has become unreachable.
assert(cast<BranchInst>(PreheaderBB->getTerminator())->isUnconditional() &&
PreheaderBB->getTerminator()->getSuccessor(0) == HeaderBB &&
"Preheader bb must end with an unconditional branch to header bb.");
PreheaderBB->getTerminator()->eraseFromParent();
DTUpdates.push_back({DominatorTree::Delete, PreheaderBB, HeaderBB});
// Create a new preheader basic block before loop header basic block.
auto *PhonyPreheaderBB = BasicBlock::Create(
Context, LoopName + ".phonypreheaderbb", Func, HeaderBB);
// And insert an unconditional branch from phony preheader basic block to
// loop header basic block.
IRBuilder<>(PhonyPreheaderBB).CreateBr(HeaderBB);
DTUpdates.push_back({DominatorTree::Insert, PhonyPreheaderBB, HeaderBB});
// Create a *single* new empty block that we will substitute as a
// successor basic block for the loop's exits. This one is temporary.
// Much like phony preheader basic block, it is not connected.
auto *PhonySuccessorBB =
BasicBlock::Create(Context, LoopName + ".phonysuccessorbb", Func,
LoopLatchBB->getNextNode());
// That block must have *some* non-PHI instruction, or else deleteDeadLoop()
// will mess up cleanup of dbginfo, and verifier will complain.
IRBuilder<>(PhonySuccessorBB).CreateUnreachable();
// Create two new empty blocks that we will use to preserve the original
// loop exit control-flow, and preserve the incoming values in the PHI nodes
// in loop's successor exit blocks. These will live one.
auto *ComparedUnequalBB =
BasicBlock::Create(Context, ComparedEqual->getName() + ".unequalbb", Func,
PhonySuccessorBB->getNextNode());
auto *ComparedEqualBB =
BasicBlock::Create(Context, ComparedEqual->getName() + ".equalbb", Func,
PhonySuccessorBB->getNextNode());
// By now we have: (1/6)
// PreheaderBB: ; preds = ???
// <...>
// %memcmp = call i32 @memcmp(i8* %LoadSrcA, i8* %LoadSrcB, i64 %Nbytes)
// %ComparedEqual = icmp eq <...> %memcmp, 0
// [no terminator instruction!]
// PhonyPreheaderBB: <preheader> ; No preds, UNREACHABLE!
// br label %LoopHeaderBB
// LoopHeaderBB: <header,exiting> ; preds = %PhonyPreheaderBB, %LoopLatchBB
// <...>
// br i1 %<...>, label %LoopLatchBB, label %Successor0BB
// LoopLatchBB: <latch,exiting> ; preds = %LoopHeaderBB
// <...>
// br i1 %<...>, label %Successor1BB, label %LoopHeaderBB
// PhonySuccessorBB: ; No preds, UNREACHABLE!
// unreachable
// EqualBB: ; No preds, UNREACHABLE!
// [no terminator instruction!]
// UnequalBB: ; No preds, UNREACHABLE!
// [no terminator instruction!]
// Successor0BB: <exit> ; preds = %LoopHeaderBB
// %S0PHI = phi <...> [ <...>, %LoopHeaderBB ]
// <...>
// Successor1BB: <exit> ; preds = %LoopLatchBB
// %S1PHI = phi <...> [ <...>, %LoopLatchBB ]
// <...>
// What is the mapping/replacement basic block for exiting out of the loop
// from either of old's loop basic blocks?
auto GetReplacementBB = [this, ComparedEqualBB,
ComparedUnequalBB](const BasicBlock *OldBB) {
assert(CurLoop->contains(OldBB) && "Only for loop's basic blocks.");
if (OldBB == CurLoop->getLoopLatch()) // "all elements compared equal".
return ComparedEqualBB;
if (OldBB == CurLoop->getHeader()) // "element compared unequal".
return ComparedUnequalBB;
llvm_unreachable("Only had two basic blocks in loop.");
};
// What are the exits out of this loop?
SmallVector<Loop::Edge, 2> LoopExitEdges;
CurLoop->getExitEdges(LoopExitEdges);
assert(LoopExitEdges.size() == 2 && "Should have only to two exit edges.");
// Populate new basic blocks, update the exiting control-flow, PHI nodes.
for (const Loop::Edge &Edge : LoopExitEdges) {
auto *OldLoopBB = const_cast<BasicBlock *>(Edge.first);
auto *SuccessorBB = const_cast<BasicBlock *>(Edge.second);
assert(CurLoop->contains(OldLoopBB) && !CurLoop->contains(SuccessorBB) &&
"Unexpected edge.");
// If we would exit the loop from this loop's basic block,
// what semantically would that mean? Did comparison succeed or fail?
BasicBlock *NewBB = GetReplacementBB(OldLoopBB);
assert(NewBB->empty() && "Should not get same new basic block here twice.");
IRBuilder<> Builder(NewBB);
Builder.SetCurrentDebugLocation(OldLoopBB->getTerminator()->getDebugLoc());
Builder.CreateBr(SuccessorBB);
DTUpdates.push_back({DominatorTree::Insert, NewBB, SuccessorBB});
// Also, be *REALLY* careful with PHI nodes in successor basic block,
// update them to recieve the same input value, but not from current loop's
// basic block, but from new basic block instead.
SuccessorBB->replacePhiUsesWith(OldLoopBB, NewBB);
// Also, change loop control-flow. This loop's basic block shall no longer
// exit from the loop to it's original successor basic block, but to our new
// phony successor basic block. Note that new successor will be unique exit.
OldLoopBB->getTerminator()->replaceSuccessorWith(SuccessorBB,
PhonySuccessorBB);
DTUpdates.push_back({DominatorTree::Delete, OldLoopBB, SuccessorBB});
DTUpdates.push_back({DominatorTree::Insert, OldLoopBB, PhonySuccessorBB});
}
// Inform DomTree about edge changes. Note that LoopInfo is still out-of-date.
assert(DTUpdates.size() == 8 && "Update count prediction failed.");
DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
DTU.applyUpdates(DTUpdates);
DTUpdates.clear();
// By now we have: (2/6)
// PreheaderBB: ; preds = ???
// <...>
// %memcmp = call i32 @memcmp(i8* %LoadSrcA, i8* %LoadSrcB, i64 %Nbytes)
// %ComparedEqual = icmp eq <...> %memcmp, 0
// [no terminator instruction!]
// PhonyPreheaderBB: <preheader> ; No preds, UNREACHABLE!
// br label %LoopHeaderBB
// LoopHeaderBB: <header,exiting> ; preds = %PhonyPreheaderBB, %LoopLatchBB
// <...>
// br i1 %<...>, label %LoopLatchBB, label %PhonySuccessorBB
// LoopLatchBB: <latch,exiting> ; preds = %LoopHeaderBB
// <...>
// br i1 %<...>, label %PhonySuccessorBB, label %LoopHeaderBB
// PhonySuccessorBB: <uniq. exit> ; preds = %LoopHeaderBB, %LoopLatchBB
// unreachable
// EqualBB: ; No preds, UNREACHABLE!
// br label %Successor1BB
// UnequalBB: ; No preds, UNREACHABLE!
// br label %Successor0BB
// Successor0BB: ; preds = %UnequalBB
// %S0PHI = phi <...> [ <...>, %UnequalBB ]
// <...>
// Successor1BB: ; preds = %EqualBB
// %S0PHI = phi <...> [ <...>, %EqualBB ]
// <...>
// *Finally*, zap the original loop. Record it's parent loop though.
Loop *ParentLoop = CurLoop->getParentLoop();
LLVM_DEBUG(dbgs() << "Deleting old loop.\n");
LoopDeleter.markLoopAsDeleted(CurLoop); // Mark as deleted *BEFORE* deleting!
deleteDeadLoop(CurLoop, DT, SE, LI); // And actually delete the loop.
CurLoop = nullptr;
// By now we have: (3/6)
// PreheaderBB: ; preds = ???
// <...>
// %memcmp = call i32 @memcmp(i8* %LoadSrcA, i8* %LoadSrcB, i64 %Nbytes)
// %ComparedEqual = icmp eq <...> %memcmp, 0
// [no terminator instruction!]
// PhonyPreheaderBB: ; No preds, UNREACHABLE!
// br label %PhonySuccessorBB
// PhonySuccessorBB: ; preds = %PhonyPreheaderBB
// unreachable
// EqualBB: ; No preds, UNREACHABLE!
// br label %Successor1BB
// UnequalBB: ; No preds, UNREACHABLE!
// br label %Successor0BB
// Successor0BB: ; preds = %UnequalBB
// %S0PHI = phi <...> [ <...>, %UnequalBB ]
// <...>
// Successor1BB: ; preds = %EqualBB
// %S0PHI = phi <...> [ <...>, %EqualBB ]
// <...>
// Now, actually restore the CFG.
// Insert an unconditional branch from an actual preheader basic block to
// phony preheader basic block.
IRBuilder<>(PreheaderBB).CreateBr(PhonyPreheaderBB);
DTUpdates.push_back({DominatorTree::Insert, PhonyPreheaderBB, HeaderBB});
// Insert proper conditional branch from phony successor basic block to the
// "dispatch" basic blocks, which were used to preserve incoming values in
// original loop's successor basic blocks.
assert(isa<UnreachableInst>(PhonySuccessorBB->getTerminator()) &&
"Yep, that's the one we created to keep deleteDeadLoop() happy.");
PhonySuccessorBB->getTerminator()->eraseFromParent();
{
IRBuilder<> Builder(PhonySuccessorBB);
Builder.SetCurrentDebugLocation(ComparedEqual->getDebugLoc());
Builder.CreateCondBr(ComparedEqual, ComparedEqualBB, ComparedUnequalBB);
}
DTUpdates.push_back(
{DominatorTree::Insert, PhonySuccessorBB, ComparedEqualBB});
DTUpdates.push_back(
{DominatorTree::Insert, PhonySuccessorBB, ComparedUnequalBB});
BasicBlock *DispatchBB = PhonySuccessorBB;
DispatchBB->setName(LoopName + ".bcmpdispatchbb");
assert(DTUpdates.size() == 3 && "Update count prediction failed.");
DTU.applyUpdates(DTUpdates);
DTUpdates.clear();
// By now we have: (4/6)
// PreheaderBB: ; preds = ???
// <...>
// %memcmp = call i32 @memcmp(i8* %LoadSrcA, i8* %LoadSrcB, i64 %Nbytes)
// %ComparedEqual = icmp eq <...> %memcmp, 0
// br label %PhonyPreheaderBB
// PhonyPreheaderBB: ; preds = %PreheaderBB
// br label %DispatchBB
// DispatchBB: ; preds = %PhonyPreheaderBB
// br i1 %ComparedEqual, label %EqualBB, label %UnequalBB
// EqualBB: ; preds = %DispatchBB
// br label %Successor1BB
// UnequalBB: ; preds = %DispatchBB
// br label %Successor0BB
// Successor0BB: ; preds = %UnequalBB
// %S0PHI = phi <...> [ <...>, %UnequalBB ]
// <...>
// Successor1BB: ; preds = %EqualBB
// %S0PHI = phi <...> [ <...>, %EqualBB ]
// <...>
// The basic CFG has been restored! Now let's merge redundant basic blocks.
// Merge phony successor basic block into it's only predecessor,
// phony preheader basic block. It is fully pointlessly redundant.
MergeBasicBlockIntoOnlyPred(DispatchBB, &DTU);
// By now we have: (5/6)
// PreheaderBB: ; preds = ???
// <...>
// %memcmp = call i32 @memcmp(i8* %LoadSrcA, i8* %LoadSrcB, i64 %Nbytes)
// %ComparedEqual = icmp eq <...> %memcmp, 0
// br label %DispatchBB
// DispatchBB: ; preds = %PreheaderBB
// br i1 %ComparedEqual, label %EqualBB, label %UnequalBB
// EqualBB: ; preds = %DispatchBB
// br label %Successor1BB
// UnequalBB: ; preds = %DispatchBB
// br label %Successor0BB
// Successor0BB: ; preds = %UnequalBB
// %S0PHI = phi <...> [ <...>, %UnequalBB ]
// <...>
// Successor1BB: ; preds = %EqualBB
// %S0PHI = phi <...> [ <...>, %EqualBB ]
// <...>
// Was this loop nested?
if (!ParentLoop) {
// If the loop was *NOT* nested, then let's also merge phony successor
// basic block into it's only predecessor, preheader basic block.
// Also, here we need to update LoopInfo.
LI->removeBlock(PreheaderBB);
MergeBasicBlockIntoOnlyPred(DispatchBB, &DTU);
// By now we have: (6/6)
// DispatchBB: ; preds = ???
// <...>
// %memcmp = call i32 @memcmp(i8* %LoadSrcA, i8* %LoadSrcB, i64 %Nbytes)
// %ComparedEqual = icmp eq <...> %memcmp, 0
// br i1 %ComparedEqual, label %EqualBB, label %UnequalBB
// EqualBB: ; preds = %DispatchBB
// br label %Successor1BB
// UnequalBB: ; preds = %DispatchBB
// br label %Successor0BB
// Successor0BB: ; preds = %UnequalBB
// %S0PHI = phi <...> [ <...>, %UnequalBB ]
// <...>
// Successor1BB: ; preds = %EqualBB
// %S0PHI = phi <...> [ <...>, %EqualBB ]
// <...>
return DispatchBB;
}
// Otherwise, we need to "preserve" the LoopSimplify form of the deleted loop.
// To achieve that, we shall keep the preheader basic block (mainly so that
// the loop header block will be guaranteed to have a predecessor outside of
// the loop), and create a phony loop with all these new three basic blocks.
Loop *PhonyLoop = LI->AllocateLoop();
ParentLoop->addChildLoop(PhonyLoop);
PhonyLoop->addBasicBlockToLoop(DispatchBB, *LI);
PhonyLoop->addBasicBlockToLoop(ComparedEqualBB, *LI);
PhonyLoop->addBasicBlockToLoop(ComparedUnequalBB, *LI);
// But we only have a preheader basic block, a header basic block block and
// two exiting basic blocks. For a proper loop we also need a backedge from
// non-header basic block to header bb.
// Let's just add a never-taken branch from both of the exiting basic blocks.
for (BasicBlock *BB : {ComparedEqualBB, ComparedUnequalBB}) {
BranchInst *OldTerminator = cast<BranchInst>(BB->getTerminator());
assert(OldTerminator->isUnconditional() && "That's the one we created.");
BasicBlock *SuccessorBB = OldTerminator->getSuccessor(0);
IRBuilder<> Builder(OldTerminator);
Builder.SetCurrentDebugLocation(OldTerminator->getDebugLoc());
Builder.CreateCondBr(ConstantInt::getTrue(Context), SuccessorBB,
DispatchBB);
OldTerminator->eraseFromParent();
// Yes, the backedge will never be taken. The control-flow is redundant.
// If it can be simplified further, other passes will take care.
DTUpdates.push_back({DominatorTree::Delete, BB, SuccessorBB});
DTUpdates.push_back({DominatorTree::Insert, BB, SuccessorBB});
DTUpdates.push_back({DominatorTree::Insert, BB, DispatchBB});
}
assert(DTUpdates.size() == 6 && "Update count prediction failed.");
DTU.applyUpdates(DTUpdates);
DTUpdates.clear();
// By now we have: (6/6)
// PreheaderBB: <preheader> ; preds = ???
// <...>
// %memcmp = call i32 @memcmp(i8* %LoadSrcA, i8* %LoadSrcB, i64 %Nbytes)
// %ComparedEqual = icmp eq <...> %memcmp, 0
// br label %BCmpDispatchBB
// BCmpDispatchBB: <header> ; preds = %PreheaderBB
// br i1 %ComparedEqual, label %EqualBB, label %UnequalBB
// EqualBB: <latch,exiting> ; preds = %BCmpDispatchBB
// br i1 %true, label %Successor1BB, label %BCmpDispatchBB
// UnequalBB: <latch,exiting> ; preds = %BCmpDispatchBB
// br i1 %true, label %Successor0BB, label %BCmpDispatchBB
// Successor0BB: ; preds = %UnequalBB
// %S0PHI = phi <...> [ <...>, %UnequalBB ]
// <...>
// Successor1BB: ; preds = %EqualBB
// %S0PHI = phi <...> [ <...>, %EqualBB ]
// <...>
// Finally fully DONE!
return DispatchBB;
}
void LoopIdiomRecognize::transformLoopToBCmp(ICmpInst *BCmpInst,
CmpInst *LatchCmpInst,
LoadInst *LoadA, LoadInst *LoadB,
const SCEV *SrcA, const SCEV *SrcB,
const SCEV *NBytes) {
// We will be inserting before the terminator instruction of preheader block.
IRBuilder<> Builder(CurLoop->getLoopPreheader()->getTerminator());
LLVM_DEBUG(dbgs() << "Transforming bcmp loop idiom into a call.\n");
LLVM_DEBUG(dbgs() << "Emitting new instructions.\n");
// Expand the SCEV expressions for both sources to compare, and produce value
// for the byte len (beware of Iterations potentially being a pointer, and
// account for element size being BCmpTyBytes bytes, which may be not 1 byte)
Value *PtrA, *PtrB, *Len;
{
SCEVExpander SExp(*SE, *DL, "LoopToBCmp");
SExp.setInsertPoint(&*Builder.GetInsertPoint());
auto HandlePtr = [&SExp](LoadInst *Load, const SCEV *Src) {
SExp.SetCurrentDebugLocation(DebugLoc());
// If the pointer operand of original load had dbgloc - use it.
if (const auto *I = dyn_cast<Instruction>(Load->getPointerOperand()))
SExp.SetCurrentDebugLocation(I->getDebugLoc());
return SExp.expandCodeFor(Src);
};
PtrA = HandlePtr(LoadA, SrcA);
PtrB = HandlePtr(LoadB, SrcB);
// For len calculation let's use dbgloc for the loop's latch condition.
Builder.SetCurrentDebugLocation(LatchCmpInst->getDebugLoc());
SExp.SetCurrentDebugLocation(LatchCmpInst->getDebugLoc());
Len = SExp.expandCodeFor(NBytes);
Type *CmpFuncSizeTy = DL->getIntPtrType(Builder.getContext());
assert(SE->getTypeSizeInBits(Len->getType()) ==
DL->getTypeSizeInBits(CmpFuncSizeTy) &&
"Len should already have the correct size.");
// Make sure that iteration count is a number, insert ptrtoint cast if not.
if (Len->getType()->isPointerTy())
Len = Builder.CreatePtrToInt(Len, CmpFuncSizeTy);
assert(Len->getType() == CmpFuncSizeTy && "Should have correct type now.");
Len->setName(Len->getName() + ".bytecount");
// There is no legality check needed. We want to compare that the memory
// regions [PtrA, PtrA+Len) and [PtrB, PtrB+Len) are fully identical, equal.
// For them to be fully equal, they must match bit-by-bit. And likewise,
// for them to *NOT* be fully equal, they have to differ just by one bit.
// The step of comparison (bits compared at once) simply does not matter.
}
// For the rest of new instructions, dbgloc should point at the value cmp.
Builder.SetCurrentDebugLocation(BCmpInst->getDebugLoc());
// Emit the comparison itself.
auto *CmpCall =
cast<CallInst>(HasBCmp ? emitBCmp(PtrA, PtrB, Len, Builder, *DL, TLI)
: emitMemCmp(PtrA, PtrB, Len, Builder, *DL, TLI));
// FIXME: add {B,Mem}CmpInst with MemoryCompareInst
// (based on MemIntrinsicBase) as base?
// FIXME: propagate metadata from loads? (alignments, AS, TBAA, ...)
// {b,mem}cmp returned 0 if they were equal, or non-zero if not equal.
auto *ComparedEqual = cast<ICmpInst>(Builder.CreateICmpEQ(
CmpCall, ConstantInt::get(CmpCall->getType(), 0),
PtrA->getName() + ".vs." + PtrB->getName() + ".eqcmp"));
BasicBlock *BB = transformBCmpControlFlow(ComparedEqual);
Builder.ClearInsertionPoint();
// We're done.
LLVM_DEBUG(dbgs() << "Transformed loop bcmp idiom into a call.\n");
ORE.emit([&]() {
return OptimizationRemark(DEBUG_TYPE, "TransformedBCmpIdiomToCall",
CmpCall->getDebugLoc(), BB)
<< "Transformed bcmp idiom into a call to "
<< ore::NV("NewFunction", CmpCall->getCalledFunction())
<< "() function";
});
++NumBCmp;
}
/// Recognizes a bcmp idiom in a non-countable loop.
///
/// If detected, transforms the relevant code to issue the bcmp (or memcmp)
/// intrinsic function call, and returns true; otherwise, returns false.
bool LoopIdiomRecognize::recognizeBCmp() {
if (!HasMemCmp && !HasBCmp)
return false;
ICmpInst *BCmpInst;
CmpInst *LatchCmpInst;
LoadInst *LoadA, *LoadB;
const SCEV *SrcA, *SrcB, *NBytes;
if (!detectBCmpIdiom(BCmpInst, LatchCmpInst, LoadA, LoadB, SrcA, SrcB,
NBytes)) {
LLVM_DEBUG(dbgs() << "bcmp idiom recognition failed.\n");
return false;
}
transformLoopToBCmp(BCmpInst, LatchCmpInst, LoadA, LoadB, SrcA, SrcB, NBytes);
return true;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,193 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -debugify -loop-idiom -pass-remarks=loop-idiom -pass-remarks-analysis=loop-idiom -verify -verify-each -verify-dom-info -verify-loop-info < %s -S 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
; Check that everything still works when debuginfo is present, and that it is reasonably propagated.
; #include <algorithm>
;
; bool index_iteration_eq_variable_size_no_overlap(char const* ptr, size_t count) {
; char const* ptr0 = ptr;
; char const* ptr1 = ptr + count;
; for(size_t i = 0; i < count; i++) {
; if(ptr0[i] != ptr1[i])
; return false;
; }
; return true;
; }
;
; void sink(bool);
; void loop_within_loop(size_t outer_count, char const** ptr0, char const** ptr1, size_t* count) {
; for(size_t i = 0; i != outer_count; ++i)
; sink(std::equal(ptr0[i], ptr0[i] + count[i], ptr1[i]));
; }
; CHECK: remark: <stdin>:13:1: Loop recognized as a bcmp idiom
; CHECK: remark: <stdin>:11:1: Transformed bcmp idiom into a call to memcmp() function
; CHECK: remark: <stdin>:29:1: Loop recognized as a bcmp idiom
; CHECK: remark: <stdin>:34:1: Transformed bcmp idiom into a call to memcmp() function
define i1 @_Z43index_iteration_eq_variable_size_no_overlapPKcm(i8* nocapture %ptr, i64 %count) {
; CHECK-LABEL: @_Z43index_iteration_eq_variable_size_no_overlapPKcm(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i64 [[COUNT_BYTECOUNT:%.*]], !dbg !22
; CHECK-NEXT: call void @llvm.dbg.value(metadata i8* [[ADD_PTR]], metadata !9, metadata !DIExpression()), !dbg !22
; CHECK-NEXT: [[CMP14:%.*]] = icmp eq i64 [[COUNT_BYTECOUNT]], 0, !dbg !23
; CHECK-NEXT: call void @llvm.dbg.value(metadata i1 [[CMP14]], metadata !11, metadata !DIExpression()), !dbg !23
; CHECK-NEXT: br i1 [[CMP14]], label [[CLEANUP:%.*]], label [[FOR_BODY_BCMPDISPATCHBB:%.*]], !dbg !24
; CHECK: for.body.bcmpdispatchbb:
; CHECK-NEXT: [[MEMCMP:%.*]] = call i32 @memcmp(i8* [[PTR]], i8* [[ADD_PTR]], i64 [[COUNT_BYTECOUNT]]), !dbg !25
; CHECK-NEXT: [[PTR_VS_ADD_PTR_EQCMP:%.*]] = icmp eq i32 [[MEMCMP]], 0, !dbg !25
; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 undef, metadata !14, metadata !DIExpression()), !dbg !26
; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 undef, metadata !15, metadata !DIExpression()), !dbg !27
; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 undef, metadata !16, metadata !DIExpression()), !dbg !28
; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 undef, metadata !17, metadata !DIExpression()), !dbg !29
; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 undef, metadata !18, metadata !DIExpression()), !dbg !30
; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 undef, metadata !19, metadata !DIExpression()), !dbg !25
; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 undef, metadata !20, metadata !DIExpression()), !dbg !31
; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 undef, metadata !13, metadata !DIExpression()), !dbg !32
; CHECK-NEXT: br i1 [[PTR_VS_ADD_PTR_EQCMP]], label [[PTR_VS_ADD_PTR_EQCMP_EQUALBB:%.*]], label [[PTR_VS_ADD_PTR_EQCMP_UNEQUALBB:%.*]], !dbg !25
; CHECK: ptr.vs.add.ptr.eqcmp.equalbb:
; CHECK-NEXT: br label [[CLEANUP_LOOPEXIT:%.*]], !dbg !33
; CHECK: ptr.vs.add.ptr.eqcmp.unequalbb:
; CHECK-NEXT: br label [[CLEANUP_LOOPEXIT]], !dbg !34
; CHECK: cleanup.loopexit:
; CHECK-NEXT: [[RES_PH:%.*]] = phi i1 [ false, [[PTR_VS_ADD_PTR_EQCMP_UNEQUALBB]] ], [ true, [[PTR_VS_ADD_PTR_EQCMP_EQUALBB]] ]
; CHECK-NEXT: br label [[CLEANUP]], !dbg !35
; CHECK: cleanup:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ true, [[ENTRY:%.*]] ], [ [[RES_PH]], [[CLEANUP_LOOPEXIT]] ], !dbg !36
; CHECK-NEXT: call void @llvm.dbg.value(metadata i1 [[RES]], metadata !21, metadata !DIExpression()), !dbg !36
; CHECK-NEXT: ret i1 [[RES]], !dbg !35
;
entry:
%add.ptr = getelementptr inbounds i8, i8* %ptr, i64 %count
%cmp14 = icmp eq i64 %count, 0
br i1 %cmp14, label %cleanup, label %for.body
for.cond: ; preds = %for.body
%cmp = icmp ult i64 %inc, %count
br i1 %cmp, label %for.body, label %cleanup
for.body: ; preds = %entry, %for.cond
%i.015 = phi i64 [ %inc, %for.cond ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i8, i8* %ptr, i64 %i.015
%v0 = load i8, i8* %arrayidx
%arrayidx1 = getelementptr inbounds i8, i8* %add.ptr, i64 %i.015
%v1 = load i8, i8* %arrayidx1
%cmp3 = icmp eq i8 %v0, %v1
%inc = add nuw i64 %i.015, 1
br i1 %cmp3, label %for.cond, label %cleanup
cleanup: ; preds = %for.body, %for.cond, %entry
%res = phi i1 [ true, %entry ], [ true, %for.cond ], [ false, %for.body ]
ret i1 %res
}
define void @_Z16loop_within_loopmPPKcS1_Pm(i64 %outer_count, i8** %ptr0, i8** %ptr1, i64* %count) {
; CHECK-LABEL: @_Z16loop_within_loopmPPKcS1_Pm(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP11:%.*]] = icmp eq i64 [[OUTER_COUNT:%.*]], 0, !dbg !60
; CHECK-NEXT: call void @llvm.dbg.value(metadata i1 [[CMP11]], metadata !39, metadata !DIExpression()), !dbg !60
; CHECK-NEXT: br i1 [[CMP11]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY_PREHEADER:%.*]], !dbg !61
; CHECK: for.body.preheader:
; CHECK-NEXT: br label [[FOR_BODY:%.*]], !dbg !62
; CHECK: for.cond.cleanup.loopexit:
; CHECK-NEXT: br label [[FOR_COND_CLEANUP]], !dbg !63
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void, !dbg !63
; CHECK: for.body:
; CHECK-NEXT: [[I_012:%.*]] = phi i64 [ [[INC:%.*]], [[_ZNST3__15EQUALIPKCS2_EEBT_S3_T0__EXIT:%.*]] ], [ 0, [[FOR_BODY_PREHEADER]] ], !dbg !64
; CHECK-NEXT: call void @llvm.dbg.value(metadata i64 [[I_012]], metadata !40, metadata !DIExpression()), !dbg !64
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[PTR0:%.*]], i64 [[I_012]], !dbg !65
; CHECK-NEXT: call void @llvm.dbg.value(metadata i8** [[ARRAYIDX]], metadata !41, metadata !DIExpression()), !dbg !65
; CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[ARRAYIDX]], !dbg !66
; CHECK-NEXT: call void @llvm.dbg.value(metadata i8* [[T0]], metadata !42, metadata !DIExpression()), !dbg !66
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, i64* [[COUNT:%.*]], i64 [[I_012]], !dbg !67
; CHECK-NEXT: call void @llvm.dbg.value(metadata i64* [[ARRAYIDX2]], metadata !43, metadata !DIExpression()), !dbg !67
; CHECK-NEXT: [[T1_BYTECOUNT:%.*]] = load i64, i64* [[ARRAYIDX2]], !dbg !68
; CHECK-NEXT: call void @llvm.dbg.value(metadata i64 [[T1_BYTECOUNT]], metadata !44, metadata !DIExpression()), !dbg !68
; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 [[T1_BYTECOUNT]], !dbg !69
; CHECK-NEXT: call void @llvm.dbg.value(metadata i8* [[ADD_PTR]], metadata !45, metadata !DIExpression()), !dbg !69
; CHECK-NEXT: [[CMP5_I_I:%.*]] = icmp eq i64 [[T1_BYTECOUNT]], 0, !dbg !70
; CHECK-NEXT: call void @llvm.dbg.value(metadata i1 [[CMP5_I_I]], metadata !46, metadata !DIExpression()), !dbg !70
; CHECK-NEXT: br i1 [[CMP5_I_I]], label [[_ZNST3__15EQUALIPKCS2_EEBT_S3_T0__EXIT]], label [[FOR_BODY_I_I_PREHEADER:%.*]], !dbg !62
; CHECK: for.body.i.i.preheader:
; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8*, i8** [[PTR1:%.*]], i64 [[I_012]], !dbg !71
; CHECK-NEXT: call void @llvm.dbg.value(metadata i8** [[ARRAYIDX3]], metadata !47, metadata !DIExpression()), !dbg !71
; CHECK-NEXT: [[T2:%.*]] = load i8*, i8** [[ARRAYIDX3]], !dbg !72
; CHECK-NEXT: call void @llvm.dbg.value(metadata i8* [[T2]], metadata !48, metadata !DIExpression()), !dbg !72
; CHECK-NEXT: [[MEMCMP:%.*]] = call i32 @memcmp(i8* [[T0]], i8* [[T2]], i64 [[T1_BYTECOUNT]]), !dbg !73
; CHECK-NEXT: [[T0_VS_T2_EQCMP:%.*]] = icmp eq i32 [[MEMCMP]], 0, !dbg !73
; CHECK-NEXT: br label [[FOR_BODY_I_I_BCMPDISPATCHBB:%.*]]
; CHECK: for.body.i.i.bcmpdispatchbb:
; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 undef, metadata !49, metadata !DIExpression()), !dbg !74
; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 undef, metadata !50, metadata !DIExpression()), !dbg !75
; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 undef, metadata !51, metadata !DIExpression()), !dbg !76
; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 undef, metadata !52, metadata !DIExpression()), !dbg !77
; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 undef, metadata !53, metadata !DIExpression()), !dbg !73
; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 undef, metadata !54, metadata !DIExpression()), !dbg !78
; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 undef, metadata !55, metadata !DIExpression()), !dbg !79
; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 undef, metadata !56, metadata !DIExpression()), !dbg !80
; CHECK-NEXT: br i1 [[T0_VS_T2_EQCMP]], label [[T0_VS_T2_EQCMP_EQUALBB:%.*]], label [[T0_VS_T2_EQCMP_UNEQUALBB:%.*]], !dbg !73
; CHECK: t0.vs.t2.eqcmp.equalbb:
; CHECK-NEXT: br i1 true, label [[_ZNST3__15EQUALIPKCS2_EEBT_S3_T0__EXIT_LOOPEXIT:%.*]], label [[FOR_BODY_I_I_BCMPDISPATCHBB]], !dbg !81
; CHECK: t0.vs.t2.eqcmp.unequalbb:
; CHECK-NEXT: br i1 true, label [[_ZNST3__15EQUALIPKCS2_EEBT_S3_T0__EXIT_LOOPEXIT]], label [[FOR_BODY_I_I_BCMPDISPATCHBB]], !dbg !82
; CHECK: _ZNSt3__15equalIPKcS2_EEbT_S3_T0_.exit.loopexit:
; CHECK-NEXT: [[RETVAL_0_I_I_PH:%.*]] = phi i1 [ false, [[T0_VS_T2_EQCMP_UNEQUALBB]] ], [ true, [[T0_VS_T2_EQCMP_EQUALBB]] ]
; CHECK-NEXT: br label [[_ZNST3__15EQUALIPKCS2_EEBT_S3_T0__EXIT]], !dbg !83
; CHECK: _ZNSt3__15equalIPKcS2_EEbT_S3_T0_.exit:
; CHECK-NEXT: [[RETVAL_0_I_I:%.*]] = phi i1 [ true, [[FOR_BODY]] ], [ [[RETVAL_0_I_I_PH]], [[_ZNST3__15EQUALIPKCS2_EEBT_S3_T0__EXIT_LOOPEXIT]] ], !dbg !84
; CHECK-NEXT: call void @llvm.dbg.value(metadata i1 [[RETVAL_0_I_I]], metadata !57, metadata !DIExpression()), !dbg !84
; CHECK-NEXT: tail call void @_Z4sinkb(i1 [[RETVAL_0_I_I]]), !dbg !83
; CHECK-NEXT: [[INC]] = add nuw i64 [[I_012]], 1, !dbg !85
; CHECK-NEXT: call void @llvm.dbg.value(metadata i64 [[INC]], metadata !58, metadata !DIExpression()), !dbg !85
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[INC]], [[OUTER_COUNT]], !dbg !86
; CHECK-NEXT: call void @llvm.dbg.value(metadata i1 [[CMP]], metadata !59, metadata !DIExpression()), !dbg !86
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[FOR_BODY]], !dbg !87
;
entry:
%cmp11 = icmp eq i64 %outer_count, 0
br i1 %cmp11, label %for.cond.cleanup, label %for.body
for.cond.cleanup: ; preds = %_ZNSt3__15equalIPKcS2_EEbT_S3_T0_.exit, %entry
ret void
for.body: ; preds = %entry, %_ZNSt3__15equalIPKcS2_EEbT_S3_T0_.exit
%i.012 = phi i64 [ %inc, %_ZNSt3__15equalIPKcS2_EEbT_S3_T0_.exit ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i8*, i8** %ptr0, i64 %i.012
%t0 = load i8*, i8** %arrayidx
%arrayidx2 = getelementptr inbounds i64, i64* %count, i64 %i.012
%t1 = load i64, i64* %arrayidx2
%add.ptr = getelementptr inbounds i8, i8* %t0, i64 %t1
%cmp5.i.i = icmp eq i64 %t1, 0
br i1 %cmp5.i.i, label %_ZNSt3__15equalIPKcS2_EEbT_S3_T0_.exit, label %for.body.i.i.preheader
for.body.i.i.preheader: ; preds = %for.body
%arrayidx3 = getelementptr inbounds i8*, i8** %ptr1, i64 %i.012
%t2 = load i8*, i8** %arrayidx3
br label %for.body.i.i
for.body.i.i: ; preds = %for.body.i.i.preheader, %for.inc.i.i
%__first2.addr.07.i.i = phi i8* [ %incdec.ptr1.i.i, %for.inc.i.i ], [ %t2, %for.body.i.i.preheader ]
%__first1.addr.06.i.i = phi i8* [ %incdec.ptr.i.i, %for.inc.i.i ], [ %t0, %for.body.i.i.preheader ]
%t3 = load i8, i8* %__first1.addr.06.i.i
%t4 = load i8, i8* %__first2.addr.07.i.i
%cmp.i.i.i = icmp eq i8 %t3, %t4
br i1 %cmp.i.i.i, label %for.inc.i.i, label %_ZNSt3__15equalIPKcS2_EEbT_S3_T0_.exit
for.inc.i.i: ; preds = %for.body.i.i
%incdec.ptr.i.i = getelementptr inbounds i8, i8* %__first1.addr.06.i.i, i64 1
%incdec.ptr1.i.i = getelementptr inbounds i8, i8* %__first2.addr.07.i.i, i64 1
%cmp.i.i = icmp eq i8* %incdec.ptr.i.i, %add.ptr
br i1 %cmp.i.i, label %_ZNSt3__15equalIPKcS2_EEbT_S3_T0_.exit, label %for.body.i.i
_ZNSt3__15equalIPKcS2_EEbT_S3_T0_.exit: ; preds = %for.body.i.i, %for.inc.i.i, %for.body
%retval.0.i.i = phi i1 [ true, %for.body ], [ true, %for.inc.i.i ], [ false, %for.body.i.i ]
tail call void @_Z4sinkb(i1 %retval.0.i.i)
%inc = add nuw i64 %i.012, 1
%cmp = icmp eq i64 %inc, %outer_count
br i1 %cmp, label %for.cond.cleanup, label %for.body
}
declare void @_Z4sinkb(i1)

View File

@ -1,999 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -loop-idiom -verify -verify-each -verify-dom-info -verify-loop-info < %s -S | FileCheck %s --implicit-check-not=bcmp --implicit-check-not=memcmp
; CHECK: source_filename
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
define i1 @three_blocks_and_two_latches_in_loop(i8* %ptr0, i8* %ptr1) {
; CHECK-LABEL: @three_blocks_and_two_latches_in_loop(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[I_08_BE:%.*]], [[FOR_BODY_BACKEDGE:%.*]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[PTR0:%.*]], i64 [[I_08]]
; CHECK-NEXT: [[V0:%.*]] = load i8, i8* [[ARRAYIDX]]
; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[PTR1:%.*]], i64 [[I_08]]
; CHECK-NEXT: [[V1:%.*]] = load i8, i8* [[ARRAYIDX1]]
; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[V0]], [[V1]]
; CHECK-NEXT: [[INC:%.*]] = add nuw nsw i64 [[I_08]], 1
; CHECK-NEXT: br i1 [[CMP3]], label [[FOR_PASSTHROUGH:%.*]], label [[CLEANUP:%.*]]
; CHECK: for.passthrough:
; CHECK-NEXT: br i1 true, label [[FOR_COND:%.*]], label [[FOR_BODY_BACKEDGE]]
; CHECK: for.body.backedge:
; CHECK-NEXT: [[I_08_BE]] = phi i64 [ [[INC]], [[FOR_COND]] ], [ 0, [[FOR_PASSTHROUGH]] ]
; CHECK-NEXT: br label [[FOR_BODY]]
; CHECK: for.cond:
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], 8
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY_BACKEDGE]], label [[CLEANUP]]
; CHECK: cleanup:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ false, [[FOR_BODY]] ], [ true, [[FOR_COND]] ]
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
br label %for.body
for.body:
%i.08 = phi i64 [ 0, %entry ], [ %inc, %for.cond ], [ 0, %for.passthrough ]
%arrayidx = getelementptr inbounds i8, i8* %ptr0, i64 %i.08
%v0 = load i8, i8* %arrayidx
%arrayidx1 = getelementptr inbounds i8, i8* %ptr1, i64 %i.08
%v1 = load i8, i8* %arrayidx1
%cmp3 = icmp eq i8 %v0, %v1
%inc = add nuw nsw i64 %i.08, 1
br i1 %cmp3, label %for.passthrough, label %cleanup
for.passthrough:
br i1 true, label %for.cond, label %for.body
for.cond:
%cmp = icmp ult i64 %inc, 8
br i1 %cmp, label %for.body, label %cleanup
cleanup:
%res = phi i1 [ false, %for.body ], [ true, %for.cond ]
ret i1 %res
}
define i1 @three_blocks_in_loop(i8* %ptr0, i8* %ptr1) {
; CHECK-LABEL: @three_blocks_in_loop(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_COND:%.*]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[PTR0:%.*]], i64 [[I_08]]
; CHECK-NEXT: [[V0:%.*]] = load i8, i8* [[ARRAYIDX]]
; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[PTR1:%.*]], i64 [[I_08]]
; CHECK-NEXT: [[V1:%.*]] = load i8, i8* [[ARRAYIDX1]]
; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[V0]], [[V1]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
; CHECK-NEXT: br i1 [[CMP3]], label [[FOR_PASSTHROUGH:%.*]], label [[CLEANUP:%.*]]
; CHECK: for.passthrough:
; CHECK-NEXT: br label [[FOR_COND]]
; CHECK: for.cond:
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], 8
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[CLEANUP]]
; CHECK: cleanup:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ false, [[FOR_BODY]] ], [ true, [[FOR_COND]] ]
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
br label %for.body
for.body:
%i.08 = phi i64 [ 0, %entry ], [ %inc, %for.cond ]
%arrayidx = getelementptr inbounds i8, i8* %ptr0, i64 %i.08
%v0 = load i8, i8* %arrayidx
%arrayidx1 = getelementptr inbounds i8, i8* %ptr1, i64 %i.08
%v1 = load i8, i8* %arrayidx1
%cmp3 = icmp eq i8 %v0, %v1
%inc = add nuw nsw i64 %i.08, 1
br i1 %cmp3, label %for.passthrough, label %cleanup
for.passthrough:
br label %for.cond
for.cond:
%cmp = icmp ult i64 %inc, 8
br i1 %cmp, label %for.body, label %cleanup
cleanup:
%res = phi i1 [ false, %for.body ], [ true, %for.cond ]
ret i1 %res
}
define i1 @body_cmp_is_not_equality(i8* %ptr0, i8* %ptr1) {
; CHECK-LABEL: @body_cmp_is_not_equality(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_COND:%.*]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[PTR0:%.*]], i64 [[I_08]]
; CHECK-NEXT: [[V0:%.*]] = load i8, i8* [[ARRAYIDX]]
; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[PTR1:%.*]], i64 [[I_08]]
; CHECK-NEXT: [[V1:%.*]] = load i8, i8* [[ARRAYIDX1]]
; CHECK-NEXT: [[CMP3:%.*]] = icmp ult i8 [[V0]], [[V1]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
; CHECK-NEXT: br i1 [[CMP3]], label [[FOR_COND]], label [[CLEANUP:%.*]]
; CHECK: for.cond:
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], 8
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[CLEANUP]]
; CHECK: cleanup:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ false, [[FOR_BODY]] ], [ true, [[FOR_COND]] ]
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
br label %for.body
for.body:
%i.08 = phi i64 [ 0, %entry ], [ %inc, %for.cond ]
%arrayidx = getelementptr inbounds i8, i8* %ptr0, i64 %i.08
%v0 = load i8, i8* %arrayidx
%arrayidx1 = getelementptr inbounds i8, i8* %ptr1, i64 %i.08
%v1 = load i8, i8* %arrayidx1
%cmp3 = icmp ult i8 %v0, %v1
%inc = add nuw nsw i64 %i.08, 1
br i1 %cmp3, label %for.cond, label %cleanup
for.cond:
%cmp = icmp ult i64 %inc, 8
br i1 %cmp, label %for.body, label %cleanup
cleanup:
%res = phi i1 [ false, %for.body ], [ true, %for.cond ]
ret i1 %res
}
define i1 @only_one_load(i8* %ptr0, i8* %ptr1) {
; CHECK-LABEL: @only_one_load(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_COND:%.*]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[PTR0:%.*]], i64 [[I_08]]
; CHECK-NEXT: [[V0:%.*]] = load i8, i8* [[ARRAYIDX]]
; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[V0]], 0
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
; CHECK-NEXT: br i1 [[CMP3]], label [[FOR_COND]], label [[CLEANUP:%.*]]
; CHECK: for.cond:
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], 8
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[CLEANUP]]
; CHECK: cleanup:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ false, [[FOR_BODY]] ], [ true, [[FOR_COND]] ]
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
br label %for.body
for.body:
%i.08 = phi i64 [ 0, %entry ], [ %inc, %for.cond ]
%arrayidx = getelementptr inbounds i8, i8* %ptr0, i64 %i.08
%v0 = load i8, i8* %arrayidx
%cmp3 = icmp eq i8 %v0, 0
%inc = add nuw nsw i64 %i.08, 1
br i1 %cmp3, label %for.cond, label %cleanup
for.cond:
%cmp = icmp ult i64 %inc, 8
br i1 %cmp, label %for.body, label %cleanup
cleanup:
%res = phi i1 [ false, %for.body ], [ true, %for.cond ]
ret i1 %res
}
define i1 @loads_of_less_than_byte(i7* %ptr0, i7* %ptr1) {
; CHECK-LABEL: @loads_of_less_than_byte(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_COND:%.*]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i7, i7* [[PTR0:%.*]], i64 [[I_08]]
; CHECK-NEXT: [[V0:%.*]] = load i7, i7* [[ARRAYIDX]]
; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i7, i7* [[PTR1:%.*]], i64 [[I_08]]
; CHECK-NEXT: [[V1:%.*]] = load i7, i7* [[ARRAYIDX1]]
; CHECK-NEXT: [[CMP3:%.*]] = icmp ult i7 [[V0]], [[V1]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
; CHECK-NEXT: br i1 [[CMP3]], label [[FOR_COND]], label [[CLEANUP:%.*]]
; CHECK: for.cond:
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], 8
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[CLEANUP]]
; CHECK: cleanup:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ false, [[FOR_BODY]] ], [ true, [[FOR_COND]] ]
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
br label %for.body
for.body:
%i.08 = phi i64 [ 0, %entry ], [ %inc, %for.cond ]
%arrayidx = getelementptr inbounds i7, i7* %ptr0, i64 %i.08
%v0 = load i7, i7* %arrayidx
%arrayidx1 = getelementptr inbounds i7, i7* %ptr1, i64 %i.08
%v1 = load i7, i7* %arrayidx1
%cmp3 = icmp ult i7 %v0, %v1
%inc = add nuw nsw i64 %i.08, 1
br i1 %cmp3, label %for.cond, label %cleanup
for.cond:
%cmp = icmp ult i64 %inc, 8
br i1 %cmp, label %for.body, label %cleanup
cleanup:
%res = phi i1 [ false, %for.body ], [ true, %for.cond ]
ret i1 %res
}
define i1 @loads_of_not_multiple_of_a_byte(i9* %ptr0, i9* %ptr1) {
; CHECK-LABEL: @loads_of_not_multiple_of_a_byte(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_COND:%.*]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i9, i9* [[PTR0:%.*]], i64 [[I_08]]
; CHECK-NEXT: [[V0:%.*]] = load i9, i9* [[ARRAYIDX]]
; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i9, i9* [[PTR1:%.*]], i64 [[I_08]]
; CHECK-NEXT: [[V1:%.*]] = load i9, i9* [[ARRAYIDX1]]
; CHECK-NEXT: [[CMP3:%.*]] = icmp ult i9 [[V0]], [[V1]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
; CHECK-NEXT: br i1 [[CMP3]], label [[FOR_COND]], label [[CLEANUP:%.*]]
; CHECK: for.cond:
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], 8
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[CLEANUP]]
; CHECK: cleanup:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ false, [[FOR_BODY]] ], [ true, [[FOR_COND]] ]
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
br label %for.body
for.body:
%i.08 = phi i64 [ 0, %entry ], [ %inc, %for.cond ]
%arrayidx = getelementptr inbounds i9, i9* %ptr0, i64 %i.08
%v0 = load i9, i9* %arrayidx
%arrayidx1 = getelementptr inbounds i9, i9* %ptr1, i64 %i.08
%v1 = load i9, i9* %arrayidx1
%cmp3 = icmp ult i9 %v0, %v1
%inc = add nuw nsw i64 %i.08, 1
br i1 %cmp3, label %for.cond, label %cleanup
for.cond:
%cmp = icmp ult i64 %inc, 8
br i1 %cmp, label %for.body, label %cleanup
cleanup:
%res = phi i1 [ false, %for.body ], [ true, %for.cond ]
ret i1 %res
}
define i1 @loop_instruction_used_in_phi_node_outside_loop(i8* %ptr0, i8* %ptr1) {
; CHECK-LABEL: @loop_instruction_used_in_phi_node_outside_loop(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_COND:%.*]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[PTR0:%.*]], i64 [[I_08]]
; CHECK-NEXT: [[V0:%.*]] = load i8, i8* [[ARRAYIDX]]
; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[PTR1:%.*]], i64 [[I_08]]
; CHECK-NEXT: [[V1:%.*]] = load i8, i8* [[ARRAYIDX1]]
; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[V0]], [[V1]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
; CHECK-NEXT: br i1 [[CMP3]], label [[FOR_COND]], label [[CLEANUP:%.*]]
; CHECK: for.cond:
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], 8
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[CLEANUP]]
; CHECK: cleanup:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ [[CMP3]], [[FOR_BODY]] ], [ true, [[FOR_COND]] ]
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
br label %for.body
for.body:
%i.08 = phi i64 [ 0, %entry ], [ %inc, %for.cond ]
%arrayidx = getelementptr inbounds i8, i8* %ptr0, i64 %i.08
%v0 = load i8, i8* %arrayidx
%arrayidx1 = getelementptr inbounds i8, i8* %ptr1, i64 %i.08
%v1 = load i8, i8* %arrayidx1
%cmp3 = icmp eq i8 %v0, %v1
%inc = add nuw nsw i64 %i.08, 1
br i1 %cmp3, label %for.cond, label %cleanup
for.cond:
%cmp = icmp ult i64 %inc, 8
br i1 %cmp, label %for.body, label %cleanup
cleanup:
%res = phi i1 [ %cmp3, %for.body ], [ true, %for.cond ]
ret i1 %res
}
define i1 @loop_has_write(i8* %ptr0, i8* %ptr1, i32* %write) {
; CHECK-LABEL: @loop_has_write(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_COND:%.*]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[PTR0:%.*]], i64 [[I_08]]
; CHECK-NEXT: [[V0:%.*]] = load i8, i8* [[ARRAYIDX]]
; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[PTR1:%.*]], i64 [[I_08]]
; CHECK-NEXT: [[V1:%.*]] = load i8, i8* [[ARRAYIDX1]]
; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[V0]], [[V1]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
; CHECK-NEXT: br i1 [[CMP3]], label [[FOR_COND]], label [[CLEANUP:%.*]]
; CHECK: for.cond:
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], 8
; CHECK-NEXT: store i32 0, i32* [[WRITE:%.*]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[CLEANUP]]
; CHECK: cleanup:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ false, [[FOR_BODY]] ], [ true, [[FOR_COND]] ]
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
br label %for.body
for.body:
%i.08 = phi i64 [ 0, %entry ], [ %inc, %for.cond ]
%arrayidx = getelementptr inbounds i8, i8* %ptr0, i64 %i.08
%v0 = load i8, i8* %arrayidx
%arrayidx1 = getelementptr inbounds i8, i8* %ptr1, i64 %i.08
%v1 = load i8, i8* %arrayidx1
%cmp3 = icmp eq i8 %v0, %v1
%inc = add nuw nsw i64 %i.08, 1
br i1 %cmp3, label %for.cond, label %cleanup
for.cond:
%cmp = icmp ult i64 %inc, 8
store i32 0, i32* %write
br i1 %cmp, label %for.body, label %cleanup
cleanup:
%res = phi i1 [ false, %for.body ], [ true, %for.cond ]
ret i1 %res
}
declare void @sink()
define i1 @loop_has_call(i8* %ptr0, i8* %ptr1, i32* %load) {
; CHECK-LABEL: @loop_has_call(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_COND:%.*]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[PTR0:%.*]], i64 [[I_08]]
; CHECK-NEXT: [[V0:%.*]] = load i8, i8* [[ARRAYIDX]]
; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[PTR1:%.*]], i64 [[I_08]]
; CHECK-NEXT: [[V1:%.*]] = load i8, i8* [[ARRAYIDX1]]
; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[V0]], [[V1]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
; CHECK-NEXT: br i1 [[CMP3]], label [[FOR_COND]], label [[CLEANUP:%.*]]
; CHECK: for.cond:
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], 8
; CHECK-NEXT: tail call void @sink()
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[CLEANUP]]
; CHECK: cleanup:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ false, [[FOR_BODY]] ], [ true, [[FOR_COND]] ]
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
br label %for.body
for.body:
%i.08 = phi i64 [ 0, %entry ], [ %inc, %for.cond ]
%arrayidx = getelementptr inbounds i8, i8* %ptr0, i64 %i.08
%v0 = load i8, i8* %arrayidx
%arrayidx1 = getelementptr inbounds i8, i8* %ptr1, i64 %i.08
%v1 = load i8, i8* %arrayidx1
%cmp3 = icmp eq i8 %v0, %v1
%inc = add nuw nsw i64 %i.08, 1
br i1 %cmp3, label %for.cond, label %cleanup
for.cond:
%cmp = icmp ult i64 %inc, 8
tail call void @sink()
br i1 %cmp, label %for.body, label %cleanup
cleanup:
%res = phi i1 [ false, %for.body ], [ true, %for.cond ]
ret i1 %res
}
define i1 @loop_has_atomic_load(i8* %ptr0, i8* %ptr1, i32* %load) {
; CHECK-LABEL: @loop_has_atomic_load(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_COND:%.*]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[PTR0:%.*]], i64 [[I_08]]
; CHECK-NEXT: [[V0:%.*]] = load i8, i8* [[ARRAYIDX]]
; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[PTR1:%.*]], i64 [[I_08]]
; CHECK-NEXT: [[V1:%.*]] = load i8, i8* [[ARRAYIDX1]]
; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[V0]], [[V1]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
; CHECK-NEXT: br i1 [[CMP3]], label [[FOR_COND]], label [[CLEANUP:%.*]]
; CHECK: for.cond:
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], 8
; CHECK-NEXT: [[TMP:%.*]] = load atomic i32, i32* [[LOAD:%.*]] unordered, align 1
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[CLEANUP]]
; CHECK: cleanup:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ false, [[FOR_BODY]] ], [ true, [[FOR_COND]] ]
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
br label %for.body
for.body:
%i.08 = phi i64 [ 0, %entry ], [ %inc, %for.cond ]
%arrayidx = getelementptr inbounds i8, i8* %ptr0, i64 %i.08
%v0 = load i8, i8* %arrayidx
%arrayidx1 = getelementptr inbounds i8, i8* %ptr1, i64 %i.08
%v1 = load i8, i8* %arrayidx1
%cmp3 = icmp eq i8 %v0, %v1
%inc = add nuw nsw i64 %i.08, 1
br i1 %cmp3, label %for.cond, label %cleanup
for.cond:
%cmp = icmp ult i64 %inc, 8
%tmp = load atomic i32, i32* %load unordered, align 1
br i1 %cmp, label %for.body, label %cleanup
cleanup:
%res = phi i1 [ false, %for.body ], [ true, %for.cond ]
ret i1 %res
}
define i1 @different_load_step(i8* %ptr) {
; CHECK-LABEL: @different_load_step(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i64 8
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_015:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_INC:%.*]] ]
; CHECK-NEXT: [[PTR1_014:%.*]] = phi i8* [ [[ADD_PTR]], [[ENTRY]] ], [ [[ADD_PTR3:%.*]], [[FOR_INC]] ]
; CHECK-NEXT: [[PTR0_013:%.*]] = phi i8* [ [[PTR]], [[ENTRY]] ], [ [[INCDEC_PTR:%.*]], [[FOR_INC]] ]
; CHECK-NEXT: [[V0:%.*]] = load i8, i8* [[PTR0_013]]
; CHECK-NEXT: [[V1:%.*]] = load i8, i8* [[PTR1_014]]
; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[V0]], [[V1]]
; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_INC]], label [[CLEANUP:%.*]]
; CHECK: for.inc:
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_015]], 1
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, i8* [[PTR0_013]], i64 1
; CHECK-NEXT: [[ADD_PTR3]] = getelementptr inbounds i8, i8* [[PTR1_014]], i64 2
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], 16
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[CLEANUP]]
; CHECK: cleanup:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ false, [[FOR_BODY]] ], [ true, [[FOR_INC]] ]
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
%add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
br label %for.body
for.body:
%i.015 = phi i64 [ 0, %entry ], [ %inc, %for.inc ]
%ptr1.014 = phi i8* [ %add.ptr, %entry ], [ %add.ptr3, %for.inc ]
%ptr0.013 = phi i8* [ %ptr, %entry ], [ %incdec.ptr, %for.inc ]
%v0 = load i8, i8* %ptr0.013
%v1 = load i8, i8* %ptr1.014
%cmp2 = icmp eq i8 %v0, %v1
br i1 %cmp2, label %for.inc, label %cleanup
for.inc:
%inc = add nuw nsw i64 %i.015, 1
%incdec.ptr = getelementptr inbounds i8, i8* %ptr0.013, i64 1
%add.ptr3 = getelementptr inbounds i8, i8* %ptr1.014, i64 2
%cmp = icmp ult i64 %inc, 16
br i1 %cmp, label %for.body, label %cleanup
cleanup:
%res = phi i1 [ false, %for.body ], [ true, %for.inc ]
ret i1 %res
}
define i1 @step_is_variable(i8* %ptr, i64 %step) {
; CHECK-LABEL: @step_is_variable(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i64 8
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_015:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_INC:%.*]] ]
; CHECK-NEXT: [[PTR1_014:%.*]] = phi i8* [ [[ADD_PTR]], [[ENTRY]] ], [ [[ADD_PTR3:%.*]], [[FOR_INC]] ]
; CHECK-NEXT: [[PTR0_013:%.*]] = phi i8* [ [[PTR]], [[ENTRY]] ], [ [[INCDEC_PTR:%.*]], [[FOR_INC]] ]
; CHECK-NEXT: [[V0:%.*]] = load i8, i8* [[PTR0_013]]
; CHECK-NEXT: [[V1:%.*]] = load i8, i8* [[PTR1_014]]
; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[V0]], [[V1]]
; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_INC]], label [[CLEANUP:%.*]]
; CHECK: for.inc:
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_015]], [[STEP:%.*]]
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, i8* [[PTR0_013]], i64 1
; CHECK-NEXT: [[ADD_PTR3]] = getelementptr inbounds i8, i8* [[PTR1_014]], i64 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], 16
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[CLEANUP]]
; CHECK: cleanup:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ false, [[FOR_BODY]] ], [ true, [[FOR_INC]] ]
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
%add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
br label %for.body
for.body:
%i.015 = phi i64 [ 0, %entry ], [ %inc, %for.inc ]
%ptr1.014 = phi i8* [ %add.ptr, %entry ], [ %add.ptr3, %for.inc ]
%ptr0.013 = phi i8* [ %ptr, %entry ], [ %incdec.ptr, %for.inc ]
%v0 = load i8, i8* %ptr0.013
%v1 = load i8, i8* %ptr1.014
%cmp2 = icmp eq i8 %v0, %v1
br i1 %cmp2, label %for.inc, label %cleanup
for.inc:
%inc = add nuw nsw i64 %i.015, %step
%incdec.ptr = getelementptr inbounds i8, i8* %ptr0.013, i64 1
%add.ptr3 = getelementptr inbounds i8, i8* %ptr1.014, i64 1
%cmp = icmp ult i64 %inc, 16
br i1 %cmp, label %for.body, label %cleanup
cleanup:
%res = phi i1 [ false, %for.body ], [ true, %for.inc ]
ret i1 %res
}
define i1 @load_step_is_variable(i8* %ptr, i64 %step) {
; CHECK-LABEL: @load_step_is_variable(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i64 8
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_015:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_INC:%.*]] ]
; CHECK-NEXT: [[PTR1_014:%.*]] = phi i8* [ [[ADD_PTR]], [[ENTRY]] ], [ [[ADD_PTR3:%.*]], [[FOR_INC]] ]
; CHECK-NEXT: [[PTR0_013:%.*]] = phi i8* [ [[PTR]], [[ENTRY]] ], [ [[INCDEC_PTR:%.*]], [[FOR_INC]] ]
; CHECK-NEXT: [[V0:%.*]] = load i8, i8* [[PTR0_013]]
; CHECK-NEXT: [[V1:%.*]] = load i8, i8* [[PTR1_014]]
; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[V0]], [[V1]]
; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_INC]], label [[CLEANUP:%.*]]
; CHECK: for.inc:
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_015]], 1
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, i8* [[PTR0_013]], i64 [[STEP:%.*]]
; CHECK-NEXT: [[ADD_PTR3]] = getelementptr inbounds i8, i8* [[PTR1_014]], i64 [[STEP]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], 16
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[CLEANUP]]
; CHECK: cleanup:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ false, [[FOR_BODY]] ], [ true, [[FOR_INC]] ]
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
%add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
br label %for.body
for.body:
%i.015 = phi i64 [ 0, %entry ], [ %inc, %for.inc ]
%ptr1.014 = phi i8* [ %add.ptr, %entry ], [ %add.ptr3, %for.inc ]
%ptr0.013 = phi i8* [ %ptr, %entry ], [ %incdec.ptr, %for.inc ]
%v0 = load i8, i8* %ptr0.013
%v1 = load i8, i8* %ptr1.014
%cmp2 = icmp eq i8 %v0, %v1
br i1 %cmp2, label %for.inc, label %cleanup
for.inc:
%inc = add nuw nsw i64 %i.015, 1
%incdec.ptr = getelementptr inbounds i8, i8* %ptr0.013, i64 %step
%add.ptr3 = getelementptr inbounds i8, i8* %ptr1.014, i64 %step
%cmp = icmp ult i64 %inc, 16
br i1 %cmp, label %for.body, label %cleanup
cleanup:
%res = phi i1 [ false, %for.body ], [ true, %for.inc ]
ret i1 %res
}
define i1 @step_and_load_step_is_variable(i8* %ptr, i64 %step) {
; CHECK-LABEL: @step_and_load_step_is_variable(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i64 8
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_015:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_INC:%.*]] ]
; CHECK-NEXT: [[PTR1_014:%.*]] = phi i8* [ [[ADD_PTR]], [[ENTRY]] ], [ [[ADD_PTR3:%.*]], [[FOR_INC]] ]
; CHECK-NEXT: [[PTR0_013:%.*]] = phi i8* [ [[PTR]], [[ENTRY]] ], [ [[INCDEC_PTR:%.*]], [[FOR_INC]] ]
; CHECK-NEXT: [[V0:%.*]] = load i8, i8* [[PTR0_013]]
; CHECK-NEXT: [[V1:%.*]] = load i8, i8* [[PTR1_014]]
; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[V0]], [[V1]]
; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_INC]], label [[CLEANUP:%.*]]
; CHECK: for.inc:
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_015]], [[STEP:%.*]]
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, i8* [[PTR0_013]], i64 [[STEP]]
; CHECK-NEXT: [[ADD_PTR3]] = getelementptr inbounds i8, i8* [[PTR1_014]], i64 [[STEP]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], 16
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[CLEANUP]]
; CHECK: cleanup:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ false, [[FOR_BODY]] ], [ true, [[FOR_INC]] ]
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
%add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
br label %for.body
for.body:
%i.015 = phi i64 [ 0, %entry ], [ %inc, %for.inc ]
%ptr1.014 = phi i8* [ %add.ptr, %entry ], [ %add.ptr3, %for.inc ]
%ptr0.013 = phi i8* [ %ptr, %entry ], [ %incdec.ptr, %for.inc ]
%v0 = load i8, i8* %ptr0.013
%v1 = load i8, i8* %ptr1.014
%cmp2 = icmp eq i8 %v0, %v1
br i1 %cmp2, label %for.inc, label %cleanup
for.inc:
%inc = add nuw nsw i64 %i.015, %step
%incdec.ptr = getelementptr inbounds i8, i8* %ptr0.013, i64 %step
%add.ptr3 = getelementptr inbounds i8, i8* %ptr1.014, i64 %step
%cmp = icmp ult i64 %inc, 16
br i1 %cmp, label %for.body, label %cleanup
cleanup:
%res = phi i1 [ false, %for.body ], [ true, %for.inc ]
ret i1 %res
}
define i1 @load_step_not_affine(i8* %ptr) {
; CHECK-LABEL: @load_step_not_affine(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i64 8
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_018:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_INC:%.*]] ]
; CHECK-NEXT: [[PTR1_017:%.*]] = phi i8* [ [[ADD_PTR]], [[ENTRY]] ], [ [[ADD_PTR4:%.*]], [[FOR_INC]] ]
; CHECK-NEXT: [[PTR0_016:%.*]] = phi i8* [ [[PTR]], [[ENTRY]] ], [ [[ADD_PTR3:%.*]], [[FOR_INC]] ]
; CHECK-NEXT: [[V0:%.*]] = load i8, i8* [[PTR0_016]]
; CHECK-NEXT: [[V1:%.*]] = load i8, i8* [[PTR1_017]]
; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[V0]], [[V1]]
; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_INC]], label [[CLEANUP:%.*]]
; CHECK: for.inc:
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_018]], 1
; CHECK-NEXT: [[ADD_PTR3]] = getelementptr inbounds i8, i8* [[PTR0_016]], i64 [[INC]]
; CHECK-NEXT: [[ADD_PTR4]] = getelementptr inbounds i8, i8* [[PTR1_017]], i64 [[INC]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], 16
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[CLEANUP]]
; CHECK: cleanup:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ false, [[FOR_BODY]] ], [ true, [[FOR_INC]] ]
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
%add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
br label %for.body
for.body:
%i.018 = phi i64 [ 0, %entry ], [ %inc, %for.inc ]
%ptr1.017 = phi i8* [ %add.ptr, %entry ], [ %add.ptr4, %for.inc ]
%ptr0.016 = phi i8* [ %ptr, %entry ], [ %add.ptr3, %for.inc ]
%v0 = load i8, i8* %ptr0.016
%v1 = load i8, i8* %ptr1.017
%cmp2 = icmp eq i8 %v0, %v1
br i1 %cmp2, label %for.inc, label %cleanup
for.inc:
%inc = add nuw nsw i64 %i.018, 1
%add.ptr3 = getelementptr inbounds i8, i8* %ptr0.016, i64 %inc
%add.ptr4 = getelementptr inbounds i8, i8* %ptr1.017, i64 %inc
%cmp = icmp ult i64 %inc, 16
br i1 %cmp, label %for.body, label %cleanup
cleanup:
%res = phi i1 [ false, %for.body ], [ true, %for.inc ]
ret i1 %res
}
define i1 @no_overlap_between_loads(i8* %ptr) {
; CHECK-LABEL: @no_overlap_between_loads(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i64 8
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_016:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_INC:%.*]] ]
; CHECK-NEXT: [[PTR1_015:%.*]] = phi i8* [ [[ADD_PTR]], [[ENTRY]] ], [ [[ADD_PTR4:%.*]], [[FOR_INC]] ]
; CHECK-NEXT: [[PTR0_014:%.*]] = phi i8* [ [[PTR]], [[ENTRY]] ], [ [[ADD_PTR3:%.*]], [[FOR_INC]] ]
; CHECK-NEXT: [[V0:%.*]] = load i8, i8* [[PTR0_014]]
; CHECK-NEXT: [[V1:%.*]] = load i8, i8* [[PTR1_015]]
; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[V0]], [[V1]]
; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_INC]], label [[CLEANUP:%.*]]
; CHECK: for.inc:
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_016]], 1
; CHECK-NEXT: [[ADD_PTR3]] = getelementptr inbounds i8, i8* [[PTR0_014]], i64 2
; CHECK-NEXT: [[ADD_PTR4]] = getelementptr inbounds i8, i8* [[PTR1_015]], i64 2
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], 16
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[CLEANUP]]
; CHECK: cleanup:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ false, [[FOR_BODY]] ], [ true, [[FOR_INC]] ]
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
%add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
br label %for.body
for.body:
%i.016 = phi i64 [ 0, %entry ], [ %inc, %for.inc ]
%ptr1.015 = phi i8* [ %add.ptr, %entry ], [ %add.ptr4, %for.inc ]
%ptr0.014 = phi i8* [ %ptr, %entry ], [ %add.ptr3, %for.inc ]
%v0 = load i8, i8* %ptr0.014
%v1 = load i8, i8* %ptr1.015
%cmp2 = icmp eq i8 %v0, %v1
br i1 %cmp2, label %for.inc, label %cleanup
for.inc:
%inc = add nuw nsw i64 %i.016, 1
%add.ptr3 = getelementptr inbounds i8, i8* %ptr0.014, i64 2
%add.ptr4 = getelementptr inbounds i8, i8* %ptr1.015, i64 2
%cmp = icmp ult i64 %inc, 16
br i1 %cmp, label %for.body, label %cleanup
cleanup:
%res = phi i1 [ false, %for.body ], [ true, %for.inc ]
ret i1 %res
}
define i1 @volatile_loads(i8* %ptr) {
; CHECK-LABEL: @volatile_loads(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i64 8
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_016:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_INC:%.*]] ]
; CHECK-NEXT: [[PTR1_015:%.*]] = phi i8* [ [[ADD_PTR]], [[ENTRY]] ], [ [[ADD_PTR4:%.*]], [[FOR_INC]] ]
; CHECK-NEXT: [[PTR0_014:%.*]] = phi i8* [ [[PTR]], [[ENTRY]] ], [ [[ADD_PTR3:%.*]], [[FOR_INC]] ]
; CHECK-NEXT: [[V0:%.*]] = load volatile i8, i8* [[PTR0_014]]
; CHECK-NEXT: [[V1:%.*]] = load volatile i8, i8* [[PTR1_015]]
; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[V0]], [[V1]]
; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_INC]], label [[CLEANUP:%.*]]
; CHECK: for.inc:
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_016]], 1
; CHECK-NEXT: [[ADD_PTR3]] = getelementptr inbounds i8, i8* [[PTR0_014]], i64 1
; CHECK-NEXT: [[ADD_PTR4]] = getelementptr inbounds i8, i8* [[PTR1_015]], i64 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], 16
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[CLEANUP]]
; CHECK: cleanup:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ false, [[FOR_BODY]] ], [ true, [[FOR_INC]] ]
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
%add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
br label %for.body
for.body:
%i.016 = phi i64 [ 0, %entry ], [ %inc, %for.inc ]
%ptr1.015 = phi i8* [ %add.ptr, %entry ], [ %add.ptr4, %for.inc ]
%ptr0.014 = phi i8* [ %ptr, %entry ], [ %add.ptr3, %for.inc ]
%v0 = load volatile i8, i8* %ptr0.014
%v1 = load volatile i8, i8* %ptr1.015
%cmp2 = icmp eq i8 %v0, %v1
br i1 %cmp2, label %for.inc, label %cleanup
for.inc:
%inc = add nuw nsw i64 %i.016, 1
%add.ptr3 = getelementptr inbounds i8, i8* %ptr0.014, i64 1
%add.ptr4 = getelementptr inbounds i8, i8* %ptr1.015, i64 1
%cmp = icmp ult i64 %inc, 16
br i1 %cmp, label %for.body, label %cleanup
cleanup:
%res = phi i1 [ false, %for.body ], [ true, %for.inc ]
ret i1 %res
}
define i1 @atomic_loads(i8* %ptr) {
; CHECK-LABEL: @atomic_loads(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i64 8
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_016:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_INC:%.*]] ]
; CHECK-NEXT: [[PTR1_015:%.*]] = phi i8* [ [[ADD_PTR]], [[ENTRY]] ], [ [[ADD_PTR4:%.*]], [[FOR_INC]] ]
; CHECK-NEXT: [[PTR0_014:%.*]] = phi i8* [ [[PTR]], [[ENTRY]] ], [ [[ADD_PTR3:%.*]], [[FOR_INC]] ]
; CHECK-NEXT: [[V0:%.*]] = load atomic i8, i8* [[PTR0_014]] unordered, align 1
; CHECK-NEXT: [[V1:%.*]] = load atomic i8, i8* [[PTR1_015]] unordered, align 1
; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[V0]], [[V1]]
; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_INC]], label [[CLEANUP:%.*]]
; CHECK: for.inc:
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_016]], 1
; CHECK-NEXT: [[ADD_PTR3]] = getelementptr inbounds i8, i8* [[PTR0_014]], i64 1
; CHECK-NEXT: [[ADD_PTR4]] = getelementptr inbounds i8, i8* [[PTR1_015]], i64 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], 16
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[CLEANUP]]
; CHECK: cleanup:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ false, [[FOR_BODY]] ], [ true, [[FOR_INC]] ]
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
%add.ptr = getelementptr inbounds i8, i8* %ptr, i64 8
br label %for.body
for.body:
%i.016 = phi i64 [ 0, %entry ], [ %inc, %for.inc ]
%ptr1.015 = phi i8* [ %add.ptr, %entry ], [ %add.ptr4, %for.inc ]
%ptr0.014 = phi i8* [ %ptr, %entry ], [ %add.ptr3, %for.inc ]
%v0 = load atomic i8, i8* %ptr0.014 unordered, align 1
%v1 = load atomic i8, i8* %ptr1.015 unordered, align 1
%cmp2 = icmp eq i8 %v0, %v1
br i1 %cmp2, label %for.inc, label %cleanup
for.inc:
%inc = add nuw nsw i64 %i.016, 1
%add.ptr3 = getelementptr inbounds i8, i8* %ptr0.014, i64 1
%add.ptr4 = getelementptr inbounds i8, i8* %ptr1.015, i64 1
%cmp = icmp ult i64 %inc, 16
br i1 %cmp, label %for.body, label %cleanup
cleanup:
%res = phi i1 [ false, %for.body ], [ true, %for.inc ]
ret i1 %res
}
define i1 @address_space(i8 addrspace(1)* %ptr) {
; CHECK-LABEL: @address_space(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8 addrspace(1)* [[PTR:%.*]], i64 8
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_016:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_INC:%.*]] ]
; CHECK-NEXT: [[PTR1_015:%.*]] = phi i8 addrspace(1)* [ [[ADD_PTR]], [[ENTRY]] ], [ [[ADD_PTR4:%.*]], [[FOR_INC]] ]
; CHECK-NEXT: [[PTR0_014:%.*]] = phi i8 addrspace(1)* [ [[PTR]], [[ENTRY]] ], [ [[ADD_PTR3:%.*]], [[FOR_INC]] ]
; CHECK-NEXT: [[V0:%.*]] = load i8, i8 addrspace(1)* [[PTR0_014]]
; CHECK-NEXT: [[V1:%.*]] = load i8, i8 addrspace(1)* [[PTR1_015]]
; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[V0]], [[V1]]
; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_INC]], label [[CLEANUP:%.*]]
; CHECK: for.inc:
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_016]], 1
; CHECK-NEXT: [[ADD_PTR3]] = getelementptr inbounds i8, i8 addrspace(1)* [[PTR0_014]], i64 1
; CHECK-NEXT: [[ADD_PTR4]] = getelementptr inbounds i8, i8 addrspace(1)* [[PTR1_015]], i64 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], 16
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[CLEANUP]]
; CHECK: cleanup:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ false, [[FOR_BODY]] ], [ true, [[FOR_INC]] ]
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
%add.ptr = getelementptr inbounds i8, i8 addrspace(1)* %ptr, i64 8
br label %for.body
for.body:
%i.016 = phi i64 [ 0, %entry ], [ %inc, %for.inc ]
%ptr1.015 = phi i8 addrspace(1)* [ %add.ptr, %entry ], [ %add.ptr4, %for.inc ]
%ptr0.014 = phi i8 addrspace(1)* [ %ptr, %entry ], [ %add.ptr3, %for.inc ]
%v0 = load i8, i8 addrspace(1)* %ptr0.014
%v1 = load i8, i8 addrspace(1)* %ptr1.015
%cmp2 = icmp eq i8 %v0, %v1
br i1 %cmp2, label %for.inc, label %cleanup
for.inc:
%inc = add nuw nsw i64 %i.016, 1
%add.ptr3 = getelementptr inbounds i8, i8 addrspace(1)* %ptr0.014, i64 1
%add.ptr4 = getelementptr inbounds i8, i8 addrspace(1)* %ptr1.015, i64 1
%cmp = icmp ult i64 %inc, 16
br i1 %cmp, label %for.body, label %cleanup
cleanup:
%res = phi i1 [ false, %for.body ], [ true, %for.inc ]
ret i1 %res
}
; See https://bugs.llvm.org/show_bug.cgi?id=43206 for original reduced (but runnable) test:
;
; bool do_check(int i_max, int j_max, int**bptr, int* fillch) {
; for (int i = 0; i < i_max; i++)
; for (int j = 0; j < j_max; j++)
; if (bptr[i][j] != fillch[i])
; return 1;
; return 0;
; }
; The loads proceed differently here - fillch[i] changes once per outer loop,
; while bptr[i][j] changes both in inner loop, and in outer loop.
define i1 @pr43206_different_loops(i32 %i_max, i32 %j_max, i32** %bptr, i32* %fillch) {
; CHECK-LABEL: @pr43206_different_loops(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP31:%.*]] = icmp sgt i32 [[I_MAX:%.*]], 0
; CHECK-NEXT: [[CMP229:%.*]] = icmp sgt i32 [[J_MAX:%.*]], 0
; CHECK-NEXT: [[OR_COND:%.*]] = and i1 [[CMP31]], [[CMP229]]
; CHECK-NEXT: br i1 [[OR_COND]], label [[FOR_COND1_PREHEADER_US_PREHEADER:%.*]], label [[CLEANUP12:%.*]]
; CHECK: for.cond1.preheader.us.preheader:
; CHECK-NEXT: [[WIDE_TRIP_COUNT38:%.*]] = zext i32 [[I_MAX]] to i64
; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[J_MAX]] to i64
; CHECK-NEXT: br label [[FOR_COND1_PREHEADER_US:%.*]]
; CHECK: for.cond1.preheader.us:
; CHECK-NEXT: [[INDVARS_IV36:%.*]] = phi i64 [ 0, [[FOR_COND1_PREHEADER_US_PREHEADER]] ], [ [[INDVARS_IV_NEXT37:%.*]], [[FOR_COND1_FOR_INC10_CRIT_EDGE_US:%.*]] ]
; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32*, i32** [[BPTR:%.*]], i64 [[INDVARS_IV36]]
; CHECK-NEXT: [[V0:%.*]] = load i32*, i32** [[ARRAYIDX_US]], align 8
; CHECK-NEXT: [[ARRAYIDX8_US:%.*]] = getelementptr inbounds i32, i32* [[FILLCH:%.*]], i64 [[INDVARS_IV36]]
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[ARRAYIDX8_US]], align 4
; CHECK-NEXT: br label [[FOR_BODY4_US:%.*]]
; CHECK: for.cond1.us:
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT:%.*]], [[WIDE_TRIP_COUNT]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND1_FOR_INC10_CRIT_EDGE_US]], label [[FOR_BODY4_US]]
; CHECK: for.body4.us:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_COND1_PREHEADER_US]] ], [ [[INDVARS_IV_NEXT]], [[FOR_COND1_US:%.*]] ]
; CHECK-NEXT: [[ARRAYIDX6_US:%.*]] = getelementptr inbounds i32, i32* [[V0]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[ARRAYIDX6_US]], align 4
; CHECK-NEXT: [[CMP9_US:%.*]] = icmp eq i32 [[V2]], [[V1]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: br i1 [[CMP9_US]], label [[FOR_COND1_US]], label [[CLEANUP12_LOOPEXIT:%.*]]
; CHECK: for.cond1.for.inc10_crit_edge.us:
; CHECK-NEXT: [[INDVARS_IV_NEXT37]] = add nuw nsw i64 [[INDVARS_IV36]], 1
; CHECK-NEXT: [[EXITCOND39:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT37]], [[WIDE_TRIP_COUNT38]]
; CHECK-NEXT: br i1 [[EXITCOND39]], label [[CLEANUP12_LOOPEXIT1:%.*]], label [[FOR_COND1_PREHEADER_US]]
; CHECK: cleanup12.loopexit:
; CHECK-NEXT: br label [[CLEANUP12]]
; CHECK: cleanup12.loopexit1:
; CHECK-NEXT: br label [[CLEANUP12]]
; CHECK: cleanup12:
; CHECK-NEXT: [[V3:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ true, [[CLEANUP12_LOOPEXIT]] ], [ false, [[CLEANUP12_LOOPEXIT1]] ]
; CHECK-NEXT: ret i1 [[V3]]
;
entry:
%cmp31 = icmp sgt i32 %i_max, 0
%cmp229 = icmp sgt i32 %j_max, 0
%or.cond = and i1 %cmp31, %cmp229
br i1 %or.cond, label %for.cond1.preheader.us.preheader, label %cleanup12
for.cond1.preheader.us.preheader: ; preds = %entry
%wide.trip.count38 = zext i32 %i_max to i64
%wide.trip.count = zext i32 %j_max to i64
br label %for.cond1.preheader.us
for.cond1.preheader.us: ; preds = %for.cond1.for.inc10_crit_edge.us, %for.cond1.preheader.us.preheader
%indvars.iv36 = phi i64 [ 0, %for.cond1.preheader.us.preheader ], [ %indvars.iv.next37, %for.cond1.for.inc10_crit_edge.us ]
%arrayidx.us = getelementptr inbounds i32*, i32** %bptr, i64 %indvars.iv36
%v0 = load i32*, i32** %arrayidx.us, align 8
%arrayidx8.us = getelementptr inbounds i32, i32* %fillch, i64 %indvars.iv36
%v1 = load i32, i32* %arrayidx8.us, align 4
br label %for.body4.us
for.cond1.us: ; preds = %for.body4.us
%exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
br i1 %exitcond, label %for.cond1.for.inc10_crit_edge.us, label %for.body4.us
for.body4.us: ; preds = %for.cond1.us, %for.cond1.preheader.us
%indvars.iv = phi i64 [ 0, %for.cond1.preheader.us ], [ %indvars.iv.next, %for.cond1.us ]
%arrayidx6.us = getelementptr inbounds i32, i32* %v0, i64 %indvars.iv
%v2 = load i32, i32* %arrayidx6.us, align 4
%cmp9.us = icmp eq i32 %v2, %v1
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
br i1 %cmp9.us, label %for.cond1.us, label %cleanup12
for.cond1.for.inc10_crit_edge.us: ; preds = %for.cond1.us
%indvars.iv.next37 = add nuw nsw i64 %indvars.iv36, 1
%exitcond39 = icmp eq i64 %indvars.iv.next37, %wide.trip.count38
br i1 %exitcond39, label %cleanup12, label %for.cond1.preheader.us
cleanup12: ; preds = %for.cond1.for.inc10_crit_edge.us, %for.body4.us, %entry
%v3 = phi i1 [ false, %entry ], [ true, %for.body4.us ], [ false, %for.cond1.for.inc10_crit_edge.us ]
ret i1 %v3
}
; From https://bugs.llvm.org/show_bug.cgi?id=43687
define void @body_always_branches_to_latch(i16* %a, i16* %b, i32) {
; CHECK-LABEL: @body_always_branches_to_latch(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[BODY:%.*]]
; CHECK: body:
; CHECK-NEXT: [[A_DATA:%.*]] = load i16, i16* [[A:%.*]]
; CHECK-NEXT: [[B_DATA:%.*]] = load i16, i16* [[B:%.*]]
; CHECK-NEXT: [[COMPARED_EQUAL:%.*]] = icmp eq i16 [[A_DATA]], [[B_DATA]]
; CHECK-NEXT: br i1 [[COMPARED_EQUAL]], label [[LATCH:%.*]], label [[LATCH]]
; CHECK: latch:
; CHECK-NEXT: [[SHOULD_CONTINUE:%.*]] = icmp slt i32 undef, 79
; CHECK-NEXT: br i1 [[SHOULD_CONTINUE]], label [[BODY]], label [[END:%.*]]
; CHECK: end:
; CHECK-NEXT: ret void
;
entry:
br label %body
body: ; preds = %latch, %for.end1317
%a_data = load i16, i16* %a
%b_data = load i16, i16* %b
%compared_equal = icmp eq i16 %a_data, %b_data
br i1 %compared_equal, label %latch, label %latch
latch: ; preds = %body, %body
%should_continue = icmp slt i32 undef, 79
br i1 %should_continue, label %body, label %end
end: ; preds = %latch
ret void
}

View File

@ -1,137 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -loop-idiom -verify -verify-each -verify-dom-info -verify-loop-info < %s -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
; We do not necessarily have a loop with comparison of two loaded values.
; The loop may already be doing bcmp() itself. We just need to widen it.
; FIXME: -memcpyopt does not promote memcmp() source into memcmp() / bcmp().
; #include <algorithm>
;
; // FIXME
; bool widening_of_bcmp(char const* ptr0, char const* ptr1, size_t count) {
; static constexpr auto Size = 32, Step = 32;
; for(size_t i = 0; i < count; i++, ptr0 += Step, ptr1 += Step) {
; if(bcmp(ptr0, ptr1, Size) != 0)
; return false;
; }
; return true;
; }
;
; // FIXME
; bool widening_of_overlapping_bcmp(char const* ptr0, char const* ptr1, size_t count) {
; static constexpr auto Size = 32, Step = 16;
; for(size_t i = 0; i < count; i++, ptr0 += Step, ptr1 += Step) {
; if(bcmp(ptr0, ptr1, Size) != 0)
; return false;
; }
; return true;
; }
declare i32 @bcmp(i8*, i8*, i64)
; FIXME
define i1 @_Z18widening_of_bcmpPKcS0_m(i8* %ptr0, i8* %ptr1, i64 %count) {
; CHECK-LABEL: @_Z18widening_of_bcmpPKcS0_m(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP7:%.*]] = icmp eq i64 [[COUNT:%.*]], 0
; CHECK-NEXT: br i1 [[CMP7]], label [[CLEANUP:%.*]], label [[FOR_BODY_PREHEADER:%.*]]
; CHECK: for.body.preheader:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_010:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_INC:%.*]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: [[PTR1_ADDR_09:%.*]] = phi i8* [ [[ADD_PTR2:%.*]], [[FOR_INC]] ], [ [[PTR1:%.*]], [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: [[PTR0_ADDR_08:%.*]] = phi i8* [ [[ADD_PTR:%.*]], [[FOR_INC]] ], [ [[PTR0:%.*]], [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: [[BCMP:%.*]] = tail call i32 @bcmp(i8* [[PTR0_ADDR_08]], i8* [[PTR1_ADDR_09]], i64 32)
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[BCMP]], 0
; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_INC]], label [[CLEANUP_LOOPEXIT:%.*]]
; CHECK: for.inc:
; CHECK-NEXT: [[INC]] = add nuw i64 [[I_010]], 1
; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, i8* [[PTR0_ADDR_08]], i64 32
; CHECK-NEXT: [[ADD_PTR2]] = getelementptr inbounds i8, i8* [[PTR1_ADDR_09]], i64 32
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], [[COUNT]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[CLEANUP_LOOPEXIT]]
; CHECK: cleanup.loopexit:
; CHECK-NEXT: [[RES_PH:%.*]] = phi i1 [ false, [[FOR_BODY]] ], [ true, [[FOR_INC]] ]
; CHECK-NEXT: br label [[CLEANUP]]
; CHECK: cleanup:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ true, [[ENTRY:%.*]] ], [ [[RES_PH]], [[CLEANUP_LOOPEXIT]] ]
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
%cmp7 = icmp eq i64 %count, 0
br i1 %cmp7, label %cleanup, label %for.body
for.body: ; preds = %entry, %for.inc
%i.010 = phi i64 [ %inc, %for.inc ], [ 0, %entry ]
%ptr1.addr.09 = phi i8* [ %add.ptr2, %for.inc ], [ %ptr1, %entry ]
%ptr0.addr.08 = phi i8* [ %add.ptr, %for.inc ], [ %ptr0, %entry ]
%bcmp = tail call i32 @bcmp(i8* %ptr0.addr.08, i8* %ptr1.addr.09, i64 32)
%cmp1 = icmp eq i32 %bcmp, 0
br i1 %cmp1, label %for.inc, label %cleanup
for.inc: ; preds = %for.body
%inc = add nuw i64 %i.010, 1
%add.ptr = getelementptr inbounds i8, i8* %ptr0.addr.08, i64 32
%add.ptr2 = getelementptr inbounds i8, i8* %ptr1.addr.09, i64 32
%cmp = icmp ult i64 %inc, %count
br i1 %cmp, label %for.body, label %cleanup
cleanup: ; preds = %for.body, %for.inc, %entry
%res = phi i1 [ true, %entry ], [ true, %for.inc ], [ false, %for.body ]
ret i1 %res
}
; FIXME
define i1 @_Z30widening_of_overlapping_bcmpPKcS0_m(i8* %ptr0, i8* %ptr1, i64 %count) {
; CHECK-LABEL: @_Z30widening_of_overlapping_bcmpPKcS0_m(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP7:%.*]] = icmp eq i64 [[COUNT:%.*]], 0
; CHECK-NEXT: br i1 [[CMP7]], label [[CLEANUP:%.*]], label [[FOR_BODY_PREHEADER:%.*]]
; CHECK: for.body.preheader:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_010:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_INC:%.*]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: [[PTR1_ADDR_09:%.*]] = phi i8* [ [[ADD_PTR2:%.*]], [[FOR_INC]] ], [ [[PTR1:%.*]], [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: [[PTR0_ADDR_08:%.*]] = phi i8* [ [[ADD_PTR:%.*]], [[FOR_INC]] ], [ [[PTR0:%.*]], [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: [[BCMP:%.*]] = tail call i32 @bcmp(i8* [[PTR0_ADDR_08]], i8* [[PTR1_ADDR_09]], i64 32)
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[BCMP]], 0
; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_INC]], label [[CLEANUP_LOOPEXIT:%.*]]
; CHECK: for.inc:
; CHECK-NEXT: [[INC]] = add nuw i64 [[I_010]], 1
; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, i8* [[PTR0_ADDR_08]], i64 16
; CHECK-NEXT: [[ADD_PTR2]] = getelementptr inbounds i8, i8* [[PTR1_ADDR_09]], i64 16
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], [[COUNT]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[CLEANUP_LOOPEXIT]]
; CHECK: cleanup.loopexit:
; CHECK-NEXT: [[RES_PH:%.*]] = phi i1 [ false, [[FOR_BODY]] ], [ true, [[FOR_INC]] ]
; CHECK-NEXT: br label [[CLEANUP]]
; CHECK: cleanup:
; CHECK-NEXT: [[RES:%.*]] = phi i1 [ true, [[ENTRY:%.*]] ], [ [[RES_PH]], [[CLEANUP_LOOPEXIT]] ]
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
%cmp7 = icmp eq i64 %count, 0
br i1 %cmp7, label %cleanup, label %for.body
for.body: ; preds = %entry, %for.inc
%i.010 = phi i64 [ %inc, %for.inc ], [ 0, %entry ]
%ptr1.addr.09 = phi i8* [ %add.ptr2, %for.inc ], [ %ptr1, %entry ]
%ptr0.addr.08 = phi i8* [ %add.ptr, %for.inc ], [ %ptr0, %entry ]
%bcmp = tail call i32 @bcmp(i8* %ptr0.addr.08, i8* %ptr1.addr.09, i64 32)
%cmp1 = icmp eq i32 %bcmp, 0
br i1 %cmp1, label %for.inc, label %cleanup
for.inc: ; preds = %for.body
%inc = add nuw i64 %i.010, 1
%add.ptr = getelementptr inbounds i8, i8* %ptr0.addr.08, i64 16
%add.ptr2 = getelementptr inbounds i8, i8* %ptr1.addr.09, i64 16
%cmp = icmp ult i64 %inc, %count
br i1 %cmp, label %for.body, label %cleanup
cleanup: ; preds = %for.body, %for.inc, %entry
%res = phi i1 [ true, %entry ], [ true, %for.inc ], [ false, %for.body ]
ret i1 %res
}