From 9bb6beabf4f4b7815b0e172530fb190a2a04c5ad Mon Sep 17 00:00:00 2001 From: Artur Pilipenko Date: Wed, 27 Apr 2016 11:00:48 +0000 Subject: [PATCH] isSafeToLoadUnconditionally support queries without a context This is required to use this function from isSafeToSpeculativelyExecute Reviewed By: hfinkel Differential Revision: http://reviews.llvm.org/D16231 llvm-svn: 267692 --- llvm/include/llvm/Analysis/Loads.h | 6 ++++-- llvm/lib/Analysis/Loads.cpp | 9 ++++++--- .../InstCombine/InstCombineLoadStoreAlloca.cpp | 4 ++-- llvm/lib/Transforms/Scalar/SROA.cpp | 9 ++++++--- llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp | 10 +++++++--- .../lib/Transforms/Scalar/TailRecursionElimination.cpp | 3 ++- 6 files changed, 27 insertions(+), 14 deletions(-) diff --git a/llvm/include/llvm/Analysis/Loads.h b/llvm/include/llvm/Analysis/Loads.h index 49d7a222c92e..e5bd0c8d0a1e 100644 --- a/llvm/include/llvm/Analysis/Loads.h +++ b/llvm/include/llvm/Analysis/Loads.h @@ -45,13 +45,15 @@ bool isDereferenceableAndAlignedPointer(const Value *V, unsigned Align, /// isSafeToLoadUnconditionally - Return true if we know that executing a load /// from this value cannot trap. /// -/// If DT is specified this method performs context-sensitive analysis. +/// If DT and ScanFrom are specified this method performs context-sensitive +/// analysis and returns true if it is safe to load immediately before ScanFrom. /// /// If it is not obviously safe to load from the specified pointer, we do a /// quick local scan of the basic block containing ScanFrom, to determine if /// the address is already accessed. bool isSafeToLoadUnconditionally(Value *V, unsigned Align, - Instruction *ScanFrom, + const DataLayout &DL, + Instruction *ScanFrom = nullptr, const DominatorTree *DT = nullptr, const TargetLibraryInfo *TLI = nullptr); diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp index c5d231367331..445e577f8056 100644 --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -260,7 +260,8 @@ static bool AreEquivalentAddressValues(const Value *A, const Value *B) { /// \brief Check if executing a load of this pointer value cannot trap. /// -/// If DT is specified this method performs context-sensitive analysis. +/// If DT and ScanFrom are specified this method performs context-sensitive +/// analysis and returns true if it is safe to load immediately before ScanFrom. /// /// If it is not obviously safe to load from the specified pointer, we do /// a quick local scan of the basic block containing \c ScanFrom, to determine @@ -269,11 +270,10 @@ static bool AreEquivalentAddressValues(const Value *A, const Value *B) { /// This uses the pointee type to determine how many bytes need to be safe to /// load from the pointer. bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align, + const DataLayout &DL, Instruction *ScanFrom, const DominatorTree *DT, const TargetLibraryInfo *TLI) { - const DataLayout &DL = ScanFrom->getModule()->getDataLayout(); - // Zero alignment means that the load has the ABI alignment for the target if (Align == 0) Align = DL.getABITypeAlignment(V->getType()->getPointerElementType()); @@ -326,6 +326,9 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align, } } + if (!ScanFrom) + return false; + // Otherwise, be a little bit aggressive by scanning the local block where we // want to check to see if the pointer is already being loaded or stored // from/to. If so, the previous load or store would have already trapped, diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp index 0ee6045aeed8..3b0b6b767e2e 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -881,8 +881,8 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { if (SelectInst *SI = dyn_cast(Op)) { // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2). unsigned Align = LI.getAlignment(); - if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, SI) && - isSafeToLoadUnconditionally(SI->getOperand(2), Align, SI)) { + if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) && + isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) { LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1), SI->getOperand(1)->getName()+".val"); LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2), diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp index 6f65d909131d..5980478010db 100644 --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -1156,6 +1156,8 @@ static bool isSafePHIToSpeculate(PHINode &PN) { if (!HaveLoad) return false; + const DataLayout &DL = PN.getModule()->getDataLayout(); + // We can only transform this if it is safe to push the loads into the // predecessor blocks. The only thing to watch out for is that we can't put // a possibly trapping load in the predecessor if it is a critical edge. @@ -1177,7 +1179,7 @@ static bool isSafePHIToSpeculate(PHINode &PN) { // If this pointer is always safe to load, or if we can prove that there // is already a load in the block, then we can move the load to the pred // block. - if (isSafeToLoadUnconditionally(InVal, MaxAlign, TI)) + if (isSafeToLoadUnconditionally(InVal, MaxAlign, DL, TI)) continue; return false; @@ -1245,6 +1247,7 @@ static void speculatePHINodeLoads(PHINode &PN) { static bool isSafeSelectToSpeculate(SelectInst &SI) { Value *TValue = SI.getTrueValue(); Value *FValue = SI.getFalseValue(); + const DataLayout &DL = SI.getModule()->getDataLayout(); for (User *U : SI.users()) { LoadInst *LI = dyn_cast(U); @@ -1254,9 +1257,9 @@ static bool isSafeSelectToSpeculate(SelectInst &SI) { // Both operands to the select need to be dereferencable, either // absolutely (e.g. allocas) or at this point because we can see other // accesses to it. - if (!isSafeToLoadUnconditionally(TValue, LI->getAlignment(), LI)) + if (!isSafeToLoadUnconditionally(TValue, LI->getAlignment(), DL, LI)) return false; - if (!isSafeToLoadUnconditionally(FValue, LI->getAlignment(), LI)) + if (!isSafeToLoadUnconditionally(FValue, LI->getAlignment(), DL, LI)) return false; } diff --git a/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp index 5e782692a787..9ff149ae91db 100644 --- a/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp +++ b/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp @@ -1136,6 +1136,8 @@ public: /// We can do this to a select if its only uses are loads and if the operand to /// the select can be loaded unconditionally. static bool isSafeSelectToSpeculate(SelectInst *SI) { + const DataLayout &DL = SI->getModule()->getDataLayout(); + for (User *U : SI->users()) { LoadInst *LI = dyn_cast(U); if (!LI || !LI->isSimple()) return false; @@ -1143,10 +1145,10 @@ static bool isSafeSelectToSpeculate(SelectInst *SI) { // Both operands to the select need to be dereferencable, either absolutely // (e.g. allocas) or at this point because we can see other accesses to it. if (!isSafeToLoadUnconditionally(SI->getTrueValue(), LI->getAlignment(), - LI)) + DL, LI)) return false; if (!isSafeToLoadUnconditionally(SI->getFalseValue(), LI->getAlignment(), - LI)) + DL, LI)) return false; } @@ -1193,6 +1195,8 @@ static bool isSafePHIToSpeculate(PHINode *PN) { MaxAlign = std::max(MaxAlign, LI->getAlignment()); } + const DataLayout &DL = PN->getModule()->getDataLayout(); + // Okay, we know that we have one or more loads in the same block as the PHI. // We can transform this if it is safe to push the loads into the predecessor // blocks. The only thing to watch out for is that we can't put a possibly @@ -1217,7 +1221,7 @@ static bool isSafePHIToSpeculate(PHINode *PN) { // If this pointer is always safe to load, or if we can prove that there is // already a load in the block, then we can move the load to the pred block. - if (isSafeToLoadUnconditionally(InVal, MaxAlign, Pred->getTerminator())) + if (isSafeToLoadUnconditionally(InVal, MaxAlign, DL, Pred->getTerminator())) continue; return false; diff --git a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp index 132bf82a1177..7b6abc0530da 100644 --- a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp +++ b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp @@ -454,9 +454,10 @@ bool TailCallElim::CanMoveAboveCall(Instruction *I, CallInst *CI) { // does not write to memory and the load provably won't trap. // FIXME: Writes to memory only matter if they may alias the pointer // being loaded from. + const DataLayout &DL = L->getModule()->getDataLayout(); if (CI->mayWriteToMemory() || !isSafeToLoadUnconditionally(L->getPointerOperand(), - L->getAlignment(), L)) + L->getAlignment(), DL, L)) return false; } }