Move Value.isDereferenceablePointer to ValueTracking [NFC]

Move isDereferenceablePointer function to Analysis. This function recursively tracks dereferencability over a chain of values like other functions in ValueTracking.

This refactoring is motivated by further changes to support dereferenceable_or_null attribute (http://reviews.llvm.org/D8650). isDereferenceablePointer will be extended to perform context-sensitive analysis and IR is not a good place to have such functionality.

Patch by: Artur Pilipenko <apilipenko@azulsystems.com>
Differential Revision: reviews.llvm.org/D9075

llvm-svn: 235611
This commit is contained in:
Philip Reames 2015-04-23 17:36:48 +00:00
parent 745615ca00
commit 5461d45abf
9 changed files with 159 additions and 147 deletions

View File

@ -186,6 +186,13 @@ namespace llvm {
/// are lifetime markers.
bool onlyUsedByLifetimeMarkers(const Value *V);
/// isDereferenceablePointer - Return true if this is always a dereferenceable
/// pointer.
///
/// Test if this value is always a pointer to allocated and suitably aligned
/// memory for a simple load or store.
bool isDereferenceablePointer(const Value *V, const DataLayout &DL);
/// isSafeToSpeculativelyExecute - Return true if the instruction does not
/// have any effects besides calculating the result and does not have
/// undefined behavior.

View File

@ -446,12 +446,6 @@ public:
return const_cast<Value*>(this)->stripInBoundsOffsets();
}
/// \brief Check if this is always a dereferenceable pointer.
///
/// Test if this value is always a pointer to allocated and suitably aligned
/// memory for a simple load or store.
bool isDereferenceablePointer(const DataLayout &DL) const;
/// \brief Translate PHI node to its predecessor from the given basic block.
///
/// If this value is a PHI node with CurBB as its parent, return the value in

View File

@ -10,6 +10,7 @@
#include "llvm/Analysis/Passes.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InstIterator.h"
@ -53,7 +54,7 @@ bool MemDerefPrinter::runOnFunction(Function &F) {
for (auto &I: inst_range(F)) {
if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
Value *PO = LI->getPointerOperand();
if (PO->isDereferenceablePointer(DL))
if (isDereferenceablePointer(PO, DL))
Vec.push_back(PO);
}
}

View File

@ -31,6 +31,7 @@
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Statepoint.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include <cstring>
@ -2807,6 +2808,145 @@ bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
return true;
}
static bool isDereferenceableFromAttribute(const Value *BV, APInt Offset,
Type *Ty, const DataLayout &DL) {
assert(Offset.isNonNegative() && "offset can't be negative");
assert(Ty->isSized() && "must be sized");
APInt DerefBytes(Offset.getBitWidth(), 0);
if (const Argument *A = dyn_cast<Argument>(BV)) {
DerefBytes = A->getDereferenceableBytes();
} else if (auto CS = ImmutableCallSite(BV)) {
DerefBytes = CS.getDereferenceableBytes(0);
}
if (DerefBytes.getBoolValue())
if (DerefBytes.uge(Offset + DL.getTypeStoreSize(Ty)))
return true;
return false;
}
static bool isDereferenceableFromAttribute(const Value *V,
const DataLayout &DL) {
Type *VTy = V->getType();
Type *Ty = VTy->getPointerElementType();
if (!Ty->isSized())
return false;
APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0);
return isDereferenceableFromAttribute(V, Offset, Ty, DL);
}
/// Return true if Value is always a dereferenceable pointer.
///
/// Test if V is always a pointer to allocated and suitably aligned memory for
/// a simple load or store.
static bool isDereferenceablePointer(const Value *V, const DataLayout &DL,
SmallPtrSetImpl<const Value *> &Visited) {
// Note that it is not safe to speculate into a malloc'd region because
// malloc may return null.
// These are obviously ok.
if (isa<AllocaInst>(V)) return true;
// It's not always safe to follow a bitcast, for example:
// bitcast i8* (alloca i8) to i32*
// would result in a 4-byte load from a 1-byte alloca. However,
// if we're casting from a pointer from a type of larger size
// to a type of smaller size (or the same size), and the alignment
// is at least as large as for the resulting pointer type, then
// we can look through the bitcast.
if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) {
Type *STy = BC->getSrcTy()->getPointerElementType(),
*DTy = BC->getDestTy()->getPointerElementType();
if (STy->isSized() && DTy->isSized() &&
(DL.getTypeStoreSize(STy) >= DL.getTypeStoreSize(DTy)) &&
(DL.getABITypeAlignment(STy) >= DL.getABITypeAlignment(DTy)))
return isDereferenceablePointer(BC->getOperand(0), DL, Visited);
}
// Global variables which can't collapse to null are ok.
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
return !GV->hasExternalWeakLinkage();
// byval arguments are okay.
if (const Argument *A = dyn_cast<Argument>(V))
if (A->hasByValAttr())
return true;
if (isDereferenceableFromAttribute(V, DL))
return true;
// For GEPs, determine if the indexing lands within the allocated object.
if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
// Conservatively require that the base pointer be fully dereferenceable.
if (!Visited.insert(GEP->getOperand(0)).second)
return false;
if (!isDereferenceablePointer(GEP->getOperand(0), DL, Visited))
return false;
// Check the indices.
gep_type_iterator GTI = gep_type_begin(GEP);
for (User::const_op_iterator I = GEP->op_begin()+1,
E = GEP->op_end(); I != E; ++I) {
Value *Index = *I;
Type *Ty = *GTI++;
// Struct indices can't be out of bounds.
if (isa<StructType>(Ty))
continue;
ConstantInt *CI = dyn_cast<ConstantInt>(Index);
if (!CI)
return false;
// Zero is always ok.
if (CI->isZero())
continue;
// Check to see that it's within the bounds of an array.
ArrayType *ATy = dyn_cast<ArrayType>(Ty);
if (!ATy)
return false;
if (CI->getValue().getActiveBits() > 64)
return false;
if (CI->getZExtValue() >= ATy->getNumElements())
return false;
}
// Indices check out; this is dereferenceable.
return true;
}
// For gc.relocate, look through relocations
if (const IntrinsicInst *I = dyn_cast<IntrinsicInst>(V))
if (I->getIntrinsicID() == Intrinsic::experimental_gc_relocate) {
GCRelocateOperands RelocateInst(I);
return isDereferenceablePointer(RelocateInst.derivedPtr(), DL, Visited);
}
if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
return isDereferenceablePointer(ASC->getOperand(0), DL, Visited);
// If we don't know, assume the worst.
return false;
}
bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL) {
// When dereferenceability information is provided by a dereferenceable
// attribute, we know exactly how many bytes are dereferenceable. If we can
// determine the exact offset to the attributed variable, we can use that
// information here.
Type *VTy = V->getType();
Type *Ty = VTy->getPointerElementType();
if (Ty->isSized()) {
APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0);
const Value *BV = V->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
if (Offset.isNonNegative())
if (isDereferenceableFromAttribute(BV, Offset, Ty, DL))
return true;
}
SmallPtrSet<const Value *, 32> Visited;
return ::isDereferenceablePointer(V, DL, Visited);
}
bool llvm::isSafeToSpeculativelyExecute(const Value *V) {
const Operator *Inst = dyn_cast<Operator>(V);
if (!Inst)
@ -2854,7 +2994,7 @@ bool llvm::isSafeToSpeculativelyExecute(const Value *V) {
LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread))
return false;
const DataLayout &DL = LI->getModule()->getDataLayout();
return LI->getPointerOperand()->isDereferenceablePointer(DL);
return isDereferenceablePointer(LI->getPointerOperand(), DL);
}
case Instruction::Call: {
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {

View File

@ -477,137 +477,6 @@ Value *Value::stripInBoundsOffsets() {
return stripPointerCastsAndOffsets<PSK_InBounds>(this);
}
/// \brief Check if Value is always a dereferenceable pointer.
///
/// Test if V is always a pointer to allocated and suitably aligned memory for
/// a simple load or store.
static bool isDereferenceablePointer(const Value *V, const DataLayout &DL,
SmallPtrSetImpl<const Value *> &Visited) {
// Note that it is not safe to speculate into a malloc'd region because
// malloc may return null.
// These are obviously ok.
if (isa<AllocaInst>(V)) return true;
// It's not always safe to follow a bitcast, for example:
// bitcast i8* (alloca i8) to i32*
// would result in a 4-byte load from a 1-byte alloca. However,
// if we're casting from a pointer from a type of larger size
// to a type of smaller size (or the same size), and the alignment
// is at least as large as for the resulting pointer type, then
// we can look through the bitcast.
if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) {
Type *STy = BC->getSrcTy()->getPointerElementType(),
*DTy = BC->getDestTy()->getPointerElementType();
if (STy->isSized() && DTy->isSized() &&
(DL.getTypeStoreSize(STy) >= DL.getTypeStoreSize(DTy)) &&
(DL.getABITypeAlignment(STy) >= DL.getABITypeAlignment(DTy)))
return isDereferenceablePointer(BC->getOperand(0), DL, Visited);
}
// Global variables which can't collapse to null are ok.
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
return !GV->hasExternalWeakLinkage();
// byval arguments are okay. Arguments specifically marked as
// dereferenceable are okay too.
if (const Argument *A = dyn_cast<Argument>(V)) {
if (A->hasByValAttr())
return true;
else if (uint64_t Bytes = A->getDereferenceableBytes()) {
Type *Ty = V->getType()->getPointerElementType();
if (Ty->isSized() && DL.getTypeStoreSize(Ty) <= Bytes)
return true;
}
return false;
}
// Return values from call sites specifically marked as dereferenceable are
// also okay.
if (auto CS = ImmutableCallSite(V)) {
if (uint64_t Bytes = CS.getDereferenceableBytes(0)) {
Type *Ty = V->getType()->getPointerElementType();
if (Ty->isSized() && DL.getTypeStoreSize(Ty) <= Bytes)
return true;
}
}
// For GEPs, determine if the indexing lands within the allocated object.
if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
// Conservatively require that the base pointer be fully dereferenceable.
if (!Visited.insert(GEP->getOperand(0)).second)
return false;
if (!isDereferenceablePointer(GEP->getOperand(0), DL, Visited))
return false;
// Check the indices.
gep_type_iterator GTI = gep_type_begin(GEP);
for (User::const_op_iterator I = GEP->op_begin()+1,
E = GEP->op_end(); I != E; ++I) {
Value *Index = *I;
Type *Ty = *GTI++;
// Struct indices can't be out of bounds.
if (isa<StructType>(Ty))
continue;
ConstantInt *CI = dyn_cast<ConstantInt>(Index);
if (!CI)
return false;
// Zero is always ok.
if (CI->isZero())
continue;
// Check to see that it's within the bounds of an array.
ArrayType *ATy = dyn_cast<ArrayType>(Ty);
if (!ATy)
return false;
if (CI->getValue().getActiveBits() > 64)
return false;
if (CI->getZExtValue() >= ATy->getNumElements())
return false;
}
// Indices check out; this is dereferenceable.
return true;
}
// For gc.relocate, look through relocations
if (const IntrinsicInst *I = dyn_cast<IntrinsicInst>(V))
if (I->getIntrinsicID() == Intrinsic::experimental_gc_relocate) {
GCRelocateOperands RelocateInst(I);
return isDereferenceablePointer(RelocateInst.derivedPtr(), DL, Visited);
}
if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
return isDereferenceablePointer(ASC->getOperand(0), DL, Visited);
// If we don't know, assume the worst.
return false;
}
bool Value::isDereferenceablePointer(const DataLayout &DL) const {
// When dereferenceability information is provided by a dereferenceable
// attribute, we know exactly how many bytes are dereferenceable. If we can
// determine the exact offset to the attributed variable, we can use that
// information here.
Type *Ty = getType()->getPointerElementType();
if (Ty->isSized()) {
APInt Offset(DL.getTypeStoreSizeInBits(getType()), 0);
const Value *BV = stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
APInt DerefBytes(Offset.getBitWidth(), 0);
if (const Argument *A = dyn_cast<Argument>(BV))
DerefBytes = A->getDereferenceableBytes();
else if (auto CS = ImmutableCallSite(BV))
DerefBytes = CS.getDereferenceableBytes(0);
if (DerefBytes.getBoolValue() && Offset.isNonNegative()) {
if (DerefBytes.uge(Offset + DL.getTypeStoreSize(Ty)))
return true;
}
}
SmallPtrSet<const Value *, 32> Visited;
return ::isDereferenceablePointer(this, DL, Visited);
}
Value *Value::DoPHITranslation(const BasicBlock *CurBB,
const BasicBlock *PredBB) {
PHINode *PN = dyn_cast<PHINode>(this);

View File

@ -36,6 +36,7 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/CallGraphSCCPass.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
@ -321,7 +322,7 @@ static bool AllCallersPassInValidPointerForArgument(Argument *Arg) {
CallSite CS(U);
assert(CS && "Should only have direct calls!");
if (!CS.getArgument(ArgNo)->isDereferenceablePointer(DL))
if (!isDereferenceablePointer(CS.getArgument(ArgNo), DL))
return false;
}
return true;

View File

@ -1204,7 +1204,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
II->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull);
// isDereferenceablePointer -> deref attribute
if (DerivedPtr->isDereferenceablePointer(DL)) {
if (isDereferenceablePointer(DerivedPtr, DL)) {
if (Argument *A = dyn_cast<Argument>(DerivedPtr)) {
uint64_t Bytes = A->getDereferenceableBytes();
II->addDereferenceableAttr(AttributeSet::ReturnIndex, Bytes);

View File

@ -1406,7 +1406,7 @@ static bool isSafePHIToSpeculate(PHINode &PN) {
// If this pointer is always safe to load, or if we can prove that there
// is already a load in the block, then we can move the load to the pred
// block.
if (InVal->isDereferenceablePointer(DL) ||
if (isDereferenceablePointer(InVal, DL) ||
isSafeToLoadUnconditionally(InVal, TI, MaxAlign))
continue;
@ -1476,8 +1476,8 @@ static bool isSafeSelectToSpeculate(SelectInst &SI) {
Value *TValue = SI.getTrueValue();
Value *FValue = SI.getFalseValue();
const DataLayout &DL = SI.getModule()->getDataLayout();
bool TDerefable = TValue->isDereferenceablePointer(DL);
bool FDerefable = FValue->isDereferenceablePointer(DL);
bool TDerefable = isDereferenceablePointer(TValue, DL);
bool FDerefable = isDereferenceablePointer(FValue, DL);
for (User *U : SI.users()) {
LoadInst *LI = dyn_cast<LoadInst>(U);

View File

@ -1140,8 +1140,8 @@ public:
/// the select can be loaded unconditionally.
static bool isSafeSelectToSpeculate(SelectInst *SI) {
const DataLayout &DL = SI->getModule()->getDataLayout();
bool TDerefable = SI->getTrueValue()->isDereferenceablePointer(DL);
bool FDerefable = SI->getFalseValue()->isDereferenceablePointer(DL);
bool TDerefable = isDereferenceablePointer(SI->getTrueValue(), DL);
bool FDerefable = isDereferenceablePointer(SI->getFalseValue(), DL);
for (User *U : SI->users()) {
LoadInst *LI = dyn_cast<LoadInst>(U);
@ -1228,7 +1228,7 @@ static bool isSafePHIToSpeculate(PHINode *PN) {
// If this pointer is always safe to load, or if we can prove that there is
// already a load in the block, then we can move the load to the pred block.
if (InVal->isDereferenceablePointer(DL) ||
if (isDereferenceablePointer(InVal, DL) ||
isSafeToLoadUnconditionally(InVal, Pred->getTerminator(), MaxAlign))
continue;