[ValueTracking] computeOverflowForSignedAdd and isKnownNonNegative

Summary:
Refactor, NFC

Extracts computeOverflowForSignedAdd and isKnownNonNegative from NaryReassociate to ValueTracking in case
others need it.

Reviewers: reames

Subscribers: majnemer, llvm-commits

Differential Revision: http://reviews.llvm.org/D11313

llvm-svn: 245591
This commit is contained in:
Jingyue Wu 2015-08-20 18:27:04 +00:00
parent ed6b9bfeab
commit 10fcea5d4b
3 changed files with 100 additions and 36 deletions

View File

@ -20,17 +20,18 @@
#include "llvm/Support/DataTypes.h"
namespace llvm {
class Value;
class Instruction;
class APInt;
class DataLayout;
class StringRef;
class MDNode;
class AddOperator;
class AssumptionCache;
class DataLayout;
class DominatorTree;
class TargetLibraryInfo;
class LoopInfo;
class Instruction;
class Loop;
class LoopInfo;
class MDNode;
class StringRef;
class TargetLibraryInfo;
class Value;
/// Determine which bits of V are known to be either zero or one and return
/// them in the KnownZero/KnownOne bit sets.
@ -83,6 +84,12 @@ namespace llvm {
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// Returns true if the give value is known to be non-negative.
bool isKnownNonNegative(Value *V, const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
/// this predicate to simplify operations downstream. Mask is known to be
/// zero for bits that V cannot have.
@ -309,6 +316,17 @@ namespace llvm {
AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT);
OverflowResult computeOverflowForSignedAdd(Value *LHS, Value *RHS,
const DataLayout &DL,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// This version also leverages the sign bit of Add if known.
OverflowResult computeOverflowForSignedAdd(AddOperator *Add,
const DataLayout &DL,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// Return true if this function can prove that the instruction I will
/// always transfer execution to one of its successors (including the next

View File

@ -185,6 +185,14 @@ bool llvm::isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth,
return ::isKnownNonZero(V, DL, Depth, Query(AC, safeCxtI(V, CxtI), DT));
}
bool llvm::isKnownNonNegative(Value *V, const DataLayout &DL, unsigned Depth,
AssumptionCache *AC, const Instruction *CxtI,
const DominatorTree *DT) {
bool NonNegative, Negative;
ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
return NonNegative;
}
static bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
unsigned Depth, const Query &Q);
@ -3370,6 +3378,67 @@ OverflowResult llvm::computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
return OverflowResult::MayOverflow;
}
static OverflowResult computeOverflowForSignedAdd(
Value *LHS, Value *RHS, AddOperator *Add, const DataLayout &DL,
AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) {
if (Add && Add->hasNoSignedWrap()) {
return OverflowResult::NeverOverflows;
}
bool LHSKnownNonNegative, LHSKnownNegative;
bool RHSKnownNonNegative, RHSKnownNegative;
ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
AC, CxtI, DT);
ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
AC, CxtI, DT);
if ((LHSKnownNonNegative && RHSKnownNegative) ||
(LHSKnownNegative && RHSKnownNonNegative)) {
// The sign bits are opposite: this CANNOT overflow.
return OverflowResult::NeverOverflows;
}
// The remaining code needs Add to be available. Early returns if not so.
if (!Add)
return OverflowResult::MayOverflow;
// If the sign of Add is the same as at least one of the operands, this add
// CANNOT overflow. This is particularly useful when the sum is
// @llvm.assume'ed non-negative rather than proved so from analyzing its
// operands.
bool LHSOrRHSKnownNonNegative =
(LHSKnownNonNegative || RHSKnownNonNegative);
bool LHSOrRHSKnownNegative = (LHSKnownNegative || RHSKnownNegative);
if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
bool AddKnownNonNegative, AddKnownNegative;
ComputeSignBit(Add, AddKnownNonNegative, AddKnownNegative, DL,
/*Depth=*/0, AC, CxtI, DT);
if ((AddKnownNonNegative && LHSOrRHSKnownNonNegative) ||
(AddKnownNegative && LHSOrRHSKnownNegative)) {
return OverflowResult::NeverOverflows;
}
}
return OverflowResult::MayOverflow;
}
OverflowResult llvm::computeOverflowForSignedAdd(AddOperator *Add,
const DataLayout &DL,
AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT) {
return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
Add, DL, AC, CxtI, DT);
}
OverflowResult llvm::computeOverflowForSignedAdd(Value *LHS, Value *RHS,
const DataLayout &DL,
AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT) {
return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
}
bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
// FIXME: This conservative implementation can be relaxed. E.g. most
// atomic operations are guaranteed to terminate on most platforms

View File

@ -161,11 +161,6 @@ private:
// GEP's pointer size, i.e., whether Index needs to be sign-extended in order
// to be an index of GEP.
bool requiresSignExtension(Value *Index, GetElementPtrInst *GEP);
// Returns whether V is known to be non-negative at context \c Ctxt.
bool isKnownNonNegative(Value *V, Instruction *Ctxt);
// Returns whether AO may sign overflow at context \c Ctxt. It computes a
// conservative result -- it answers true when not sure.
bool maySignOverflow(AddOperator *AO, Instruction *Ctxt);
AssumptionCache *AC;
const DataLayout *DL;
@ -352,27 +347,6 @@ bool NaryReassociate::requiresSignExtension(Value *Index,
return cast<IntegerType>(Index->getType())->getBitWidth() < PointerSizeInBits;
}
bool NaryReassociate::isKnownNonNegative(Value *V, Instruction *Ctxt) {
bool NonNegative, Negative;
// TODO: ComputeSignBits is expensive. Consider caching the results.
ComputeSignBit(V, NonNegative, Negative, *DL, 0, AC, Ctxt, DT);
return NonNegative;
}
bool NaryReassociate::maySignOverflow(AddOperator *AO, Instruction *Ctxt) {
if (AO->hasNoSignedWrap())
return false;
Value *LHS = AO->getOperand(0), *RHS = AO->getOperand(1);
// If LHS or RHS has the same sign as the sum, AO doesn't sign overflow.
// TODO: handle the negative case as well.
if (isKnownNonNegative(AO, Ctxt) &&
(isKnownNonNegative(LHS, Ctxt) || isKnownNonNegative(RHS, Ctxt)))
return false;
return true;
}
GetElementPtrInst *
NaryReassociate::tryReassociateGEPAtIndex(GetElementPtrInst *GEP, unsigned I,
Type *IndexedType) {
@ -381,7 +355,7 @@ NaryReassociate::tryReassociateGEPAtIndex(GetElementPtrInst *GEP, unsigned I,
IndexToSplit = SExt->getOperand(0);
} else if (ZExtInst *ZExt = dyn_cast<ZExtInst>(IndexToSplit)) {
// zext can be treated as sext if the source is non-negative.
if (isKnownNonNegative(ZExt->getOperand(0), GEP))
if (isKnownNonNegative(ZExt->getOperand(0), *DL, 0, AC, GEP, DT))
IndexToSplit = ZExt->getOperand(0);
}
@ -389,8 +363,11 @@ NaryReassociate::tryReassociateGEPAtIndex(GetElementPtrInst *GEP, unsigned I,
// If the I-th index needs sext and the underlying add is not equipped with
// nsw, we cannot split the add because
// sext(LHS + RHS) != sext(LHS) + sext(RHS).
if (requiresSignExtension(IndexToSplit, GEP) && maySignOverflow(AO, GEP))
if (requiresSignExtension(IndexToSplit, GEP) &&
computeOverflowForSignedAdd(AO, *DL, AC, GEP, DT) !=
OverflowResult::NeverOverflows)
return nullptr;
Value *LHS = AO->getOperand(0), *RHS = AO->getOperand(1);
// IndexToSplit = LHS + RHS.
if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I, LHS, RHS, IndexedType))
@ -415,7 +392,7 @@ GetElementPtrInst *NaryReassociate::tryReassociateGEPAtIndex(
IndexExprs.push_back(SE->getSCEV(*Index));
// Replace the I-th index with LHS.
IndexExprs[I] = SE->getSCEV(LHS);
if (isKnownNonNegative(LHS, GEP) &&
if (isKnownNonNegative(LHS, *DL, 0, AC, GEP, DT) &&
DL->getTypeSizeInBits(LHS->getType()) <
DL->getTypeSizeInBits(GEP->getOperand(I)->getType())) {
// Zero-extend LHS if it is non-negative. InstCombine canonicalizes sext to