IR: Change the gep_type_iterator API to avoid always exposing the "current" type.

Instead, expose whether the current type is an array or a struct, if an array
what the upper bound is, and if a struct the struct type itself. This is
in preparation for a later change which will make PointerType derive from
Type rather than SequentialType.

Differential Revision: https://reviews.llvm.org/D26594

llvm-svn: 288458
This commit is contained in:
Peter Collingbourne 2016-12-02 02:24:42 +00:00
parent 6afcab3588
commit ab85225be4
31 changed files with 141 additions and 144 deletions

View File

@ -482,10 +482,7 @@ public:
int64_t BaseOffset = 0;
int64_t Scale = 0;
// Assumes the address space is 0 when Ptr is nullptr.
unsigned AS =
(Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace());
auto GTI = gep_type_begin(PointeeType, AS, Operands);
auto GTI = gep_type_begin(PointeeType, Operands);
for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) {
// We assume that the cost of Scalar GEP with constant index and the
// cost of Vector GEP with splat constant index are the same.
@ -493,7 +490,12 @@ public:
if (!ConstIdx)
if (auto Splat = getSplatValue(*I))
ConstIdx = dyn_cast<ConstantInt>(Splat);
if (isa<SequentialType>(*GTI)) {
if (StructType *STy = GTI.getStructTypeOrNull()) {
// For structures the index is always splat or scalar constant
assert(ConstIdx && "Unexpected GEP index");
uint64_t Field = ConstIdx->getZExtValue();
BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field);
} else {
int64_t ElementSize = DL.getTypeAllocSize(GTI.getIndexedType());
if (ConstIdx)
BaseOffset += ConstIdx->getSExtValue() * ElementSize;
@ -504,17 +506,15 @@ public:
return TTI::TCC_Basic;
Scale = ElementSize;
}
} else {
StructType *STy = cast<StructType>(*GTI);
// For structures the index is always splat or scalar constant
assert(ConstIdx && "Unexpected GEP index");
uint64_t Field = ConstIdx->getZExtValue();
BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field);
}
}
// Assumes the address space is 0 when Ptr is nullptr.
unsigned AS =
(Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace());
if (static_cast<T *>(this)->isLegalAddressingMode(
PointerType::get(*GTI, AS), const_cast<GlobalValue *>(BaseGV),
PointerType::get(Type::getInt8Ty(PointeeType->getContext()), AS),
const_cast<GlobalValue *>(BaseGV),
BaseOffset, HasBaseReg, Scale, AS)) {
return TTI::TCC_Free;
}

View File

@ -16,7 +16,7 @@
#define LLVM_IR_GETELEMENTPTRTYPEITERATOR_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/User.h"
@ -33,18 +33,15 @@ namespace llvm {
Type *, ptrdiff_t> super;
ItTy OpIt;
PointerIntPair<Type *, 1> CurTy;
unsigned AddrSpace;
PointerUnion<StructType *, Type *> CurTy;
enum { Unbounded = -1ull };
uint64_t NumElements = Unbounded;
generic_gep_type_iterator() = default;
public:
static generic_gep_type_iterator begin(Type *Ty, unsigned AddrSpace,
ItTy It) {
static generic_gep_type_iterator begin(Type *Ty, ItTy It) {
generic_gep_type_iterator I;
I.CurTy.setPointer(Ty);
I.CurTy.setInt(true);
I.AddrSpace = AddrSpace;
I.CurTy = Ty;
I.OpIt = It;
return I;
}
@ -63,34 +60,28 @@ namespace llvm {
return !operator==(x);
}
Type *operator*() const {
if (CurTy.getInt())
return CurTy.getPointer()->getPointerTo(AddrSpace);
return CurTy.getPointer();
}
// FIXME: Make this the iterator's operator*() after the 4.0 release.
// operator*() had a different meaning in earlier releases, so we're
// temporarily not giving this iterator an operator*() to avoid a subtle
// semantics break.
Type *getIndexedType() const {
if (CurTy.getInt())
return CurTy.getPointer();
CompositeType *CT = cast<CompositeType>(CurTy.getPointer());
return CT->getTypeAtIndex(getOperand());
if (auto *T = CurTy.dyn_cast<Type *>())
return T;
return CurTy.get<StructType *>()->getTypeAtIndex(getOperand());
}
// This is a non-standard operator->. It allows you to call methods on the
// current type directly.
Type *operator->() const { return operator*(); }
Value *getOperand() const { return const_cast<Value *>(&**OpIt); }
generic_gep_type_iterator& operator++() { // Preincrement
if (CurTy.getInt()) {
CurTy.setInt(false);
} else if (CompositeType *CT =
dyn_cast<CompositeType>(CurTy.getPointer())) {
CurTy.setPointer(CT->getTypeAtIndex(getOperand()));
} else {
CurTy.setPointer(nullptr);
}
Type *Ty = getIndexedType();
if (auto *ATy = dyn_cast<ArrayType>(Ty)) {
CurTy = ATy->getElementType();
NumElements = ATy->getNumElements();
} else if (auto *VTy = dyn_cast<VectorType>(Ty)) {
CurTy = VTy->getElementType();
NumElements = VTy->getNumElements();
} else
CurTy = dyn_cast<StructType>(Ty);
++OpIt;
return *this;
}
@ -98,6 +89,39 @@ namespace llvm {
generic_gep_type_iterator operator++(int) { // Postincrement
generic_gep_type_iterator tmp = *this; ++*this; return tmp;
}
// All of the below API is for querying properties of the "outer type", i.e.
// the type that contains the indexed type. Most of the time this is just
// the type that was visited immediately prior to the indexed type, but for
// the first element this is an unbounded array of the GEP's source element
// type, for which there is no clearly corresponding IR type (we've
// historically used a pointer type as the outer type in this case, but
// pointers will soon lose their element type).
//
// FIXME: Most current users of this class are just interested in byte
// offsets (a few need to know whether the outer type is a struct because
// they are trying to replace a constant with a variable, which is only
// legal for arrays, e.g. canReplaceOperandWithVariable in SimplifyCFG.cpp);
// we should provide a more minimal API here that exposes not much more than
// that.
bool isStruct() const { return CurTy.is<StructType *>(); }
bool isSequential() const { return CurTy.is<Type *>(); }
StructType *getStructType() const { return CurTy.get<StructType *>(); }
StructType *getStructTypeOrNull() const {
return CurTy.dyn_cast<StructType *>();
}
bool isBoundedSequential() const {
return isSequential() && NumElements != Unbounded;
}
uint64_t getSequentialNumElements() const {
assert(isBoundedSequential());
return NumElements;
}
};
typedef generic_gep_type_iterator<> gep_type_iterator;
@ -106,8 +130,6 @@ namespace llvm {
auto *GEPOp = cast<GEPOperator>(GEP);
return gep_type_iterator::begin(
GEPOp->getSourceElementType(),
cast<PointerType>(GEPOp->getPointerOperandType()->getScalarType())
->getAddressSpace(),
GEP->op_begin() + 1);
}
@ -119,8 +141,6 @@ namespace llvm {
auto &GEPOp = cast<GEPOperator>(GEP);
return gep_type_iterator::begin(
GEPOp.getSourceElementType(),
cast<PointerType>(GEPOp.getPointerOperandType()->getScalarType())
->getAddressSpace(),
GEP.op_begin() + 1);
}
@ -130,13 +150,13 @@ namespace llvm {
template<typename T>
inline generic_gep_type_iterator<const T *>
gep_type_begin(Type *Op0, unsigned AS, ArrayRef<T> A) {
return generic_gep_type_iterator<const T *>::begin(Op0, AS, A.begin());
gep_type_begin(Type *Op0, ArrayRef<T> A) {
return generic_gep_type_iterator<const T *>::begin(Op0, A.begin());
}
template<typename T>
inline generic_gep_type_iterator<const T *>
gep_type_end(Type * /*Op0*/, unsigned /*AS*/, ArrayRef<T> A) {
gep_type_end(Type * /*Op0*/, ArrayRef<T> A) {
return generic_gep_type_iterator<const T *>::end(A.end());
}

View File

@ -217,7 +217,7 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
continue;
// Handle a struct index, which adds its field offset to the pointer.
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
if (StructType *STy = GTI.getStructTypeOrNull()) {
if (OpC->getType()->isVectorTy())
OpC = OpC->getSplatValue();

View File

@ -412,10 +412,10 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V,
// Assume all GEP operands are constants until proven otherwise.
bool GepHasConstantOffset = true;
for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
I != E; ++I) {
I != E; ++I, ++GTI) {
const Value *Index = *I;
// Compute the (potentially symbolic) offset in bytes for this index.
if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
if (StructType *STy = GTI.getStructTypeOrNull()) {
// For a struct, add the member offset.
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
if (FieldNo == 0)
@ -431,13 +431,13 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V,
if (CIdx->isZero())
continue;
Decomposed.OtherOffset +=
DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue();
DL.getTypeAllocSize(GTI.getIndexedType()) * CIdx->getSExtValue();
continue;
}
GepHasConstantOffset = false;
uint64_t Scale = DL.getTypeAllocSize(*GTI);
uint64_t Scale = DL.getTypeAllocSize(GTI.getIndexedType());
unsigned ZExtBits = 0, SExtBits = 0;
// If the integer type is smaller than the pointer size, it is implicitly

View File

@ -318,7 +318,7 @@ bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
continue;
// Handle a struct index, which adds its field offset to the pointer.
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
if (StructType *STy = GTI.getStructTypeOrNull()) {
unsigned ElementIdx = OpC->getZExtValue();
const StructLayout *SL = DL.getStructLayout(STy);
Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));

View File

@ -1231,7 +1231,7 @@ static void computeKnownBitsFromOperator(const Operator *I, APInt &KnownZero,
gep_type_iterator GTI = gep_type_begin(I);
for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
Value *Index = I->getOperand(i);
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
if (StructType *STy = GTI.getStructTypeOrNull()) {
// Handle struct member offset arithmetic.
// Handle case when index is vector zeroinitializer
@ -1730,7 +1730,7 @@ static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
GTI != GTE; ++GTI) {
// Struct types are easy -- they must always be indexed by a constant.
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
if (StructType *STy = GTI.getStructTypeOrNull()) {
ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
unsigned ElementIdx = OpC->getZExtValue();
const StructLayout *SL = Q.DL.getStructLayout(STy);

View File

@ -107,11 +107,11 @@ unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) {
while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
// Find the type we're currently indexing into.
gep_type_iterator GEPTI = gep_type_begin(Gep);
std::advance(GEPTI, LastOperand - 1);
std::advance(GEPTI, LastOperand - 2);
// If it's a type with the same allocation size as the result of the GEP we
// can peel off the zero index.
if (DL.getTypeAllocSize(*GEPTI) != GEPAllocSize)
if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize)
break;
--LastOperand;
}

View File

@ -3261,7 +3261,7 @@ bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode,
int64_t ConstantOffset = 0;
gep_type_iterator GTI = gep_type_begin(AddrInst);
for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx =
cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();

View File

@ -488,7 +488,7 @@ bool FastISel::selectGetElementPtr(const User *I) {
for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
GTI != E; ++GTI) {
const Value *Idx = GTI.getOperand();
if (auto *StTy = dyn_cast<StructType>(*GTI)) {
if (StructType *StTy = GTI.getStructTypeOrNull()) {
uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
if (Field) {
// N = N + Offset

View File

@ -3274,7 +3274,7 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
GTI != E; ++GTI) {
const Value *Idx = GTI.getOperand();
if (StructType *StTy = dyn_cast<StructType>(*GTI)) {
if (StructType *StTy = GTI.getStructTypeOrNull()) {
unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
if (Field) {
// N = N + Offset

View File

@ -999,7 +999,7 @@ GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
uint64_t Total = 0;
for (; I != E; ++I) {
if (StructType *STy = dyn_cast<StructType>(*I)) {
if (StructType *STy = I.getStructTypeOrNull()) {
const StructLayout *SLO = getDataLayout().getStructLayout(STy);
const ConstantInt *CPU = cast<ConstantInt>(I.getOperand());
@ -1007,7 +1007,6 @@ GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
Total += SLO->getElementOffset(Index);
} else {
SequentialType *ST = cast<SequentialType>(*I);
// Get the index number for the array... which must be long type...
GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
@ -1020,7 +1019,7 @@ GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
assert(BitWidth == 64 && "Invalid index type for getelementptr");
Idx = (int64_t)IdxGV.IntVal.getZExtValue();
}
Total += getDataLayout().getTypeAllocSize(ST->getElementType()) * Idx;
Total += getDataLayout().getTypeAllocSize(I.getIndexedType()) * Idx;
}
}

View File

@ -2019,22 +2019,8 @@ static bool isInBoundsIndices(ArrayRef<IndexTy> Idxs) {
}
/// Test whether a given ConstantInt is in-range for a SequentialType.
static bool isIndexInRangeOfSequentialType(SequentialType *STy,
const ConstantInt *CI) {
// And indices are valid when indexing along a pointer
if (isa<PointerType>(STy))
return true;
uint64_t NumElements = 0;
// Determine the number of elements in our sequential type.
if (auto *ATy = dyn_cast<ArrayType>(STy))
NumElements = ATy->getNumElements();
else if (auto *VTy = dyn_cast<VectorType>(STy))
NumElements = VTy->getNumElements();
assert((isa<ArrayType>(STy) || NumElements > 0) &&
"didn't expect non-array type to have zero elements!");
static bool isIndexInRangeOfArrayType(uint64_t NumElements,
const ConstantInt *CI) {
// We cannot bounds check the index if it doesn't fit in an int64_t.
if (CI->getValue().getActiveBits() > 64)
return false;
@ -2089,10 +2075,10 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C,
// getelementptr instructions into a single instruction.
//
if (CE->getOpcode() == Instruction::GetElementPtr) {
Type *LastTy = nullptr;
gep_type_iterator LastI = gep_type_end(CE);
for (gep_type_iterator I = gep_type_begin(CE), E = gep_type_end(CE);
I != E; ++I)
LastTy = *I;
LastI = I;
// We cannot combine indices if doing so would take us outside of an
// array or vector. Doing otherwise could trick us if we evaluated such a
@ -2115,9 +2101,11 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C,
bool PerformFold = false;
if (Idx0->isNullValue())
PerformFold = true;
else if (SequentialType *STy = dyn_cast_or_null<SequentialType>(LastTy))
else if (LastI.isSequential())
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx0))
PerformFold = isIndexInRangeOfSequentialType(STy, CI);
PerformFold =
!LastI.isBoundedSequential() ||
isIndexInRangeOfArrayType(LastI.getSequentialNumElements(), CI);
if (PerformFold) {
SmallVector<Value*, 16> NewIndices;
@ -2228,7 +2216,10 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C,
Unknown = true;
continue;
}
if (isIndexInRangeOfSequentialType(STy, CI))
if (isIndexInRangeOfArrayType(isa<ArrayType>(STy)
? cast<ArrayType>(STy)->getNumElements()
: cast<VectorType>(STy)->getNumElements(),
CI))
// It's in range, skip to the next index.
continue;
if (!isa<SequentialType>(Prev)) {

View File

@ -1073,19 +1073,14 @@ bool ConstantExpr::isGEPWithNoNotionalOverIndexing() const {
gep_type_iterator GEPI = gep_type_begin(this), E = gep_type_end(this);
User::const_op_iterator OI = std::next(this->op_begin());
// Skip the first index, as it has no static limit.
++GEPI;
++OI;
// The remaining indices must be compile-time known integers within the
// bounds of the corresponding notional static array types.
for (; GEPI != E; ++GEPI, ++OI) {
ConstantInt *CI = dyn_cast<ConstantInt>(*OI);
if (!CI) return false;
if (ArrayType *ATy = dyn_cast<ArrayType>(*GEPI))
if (CI->getValue().getActiveBits() > 64 ||
CI->getZExtValue() >= ATy->getNumElements())
return false;
if (GEPI.isBoundedSequential() &&
(CI->getValue().getActiveBits() > 64 ||
CI->getZExtValue() >= GEPI.getSequentialNumElements()))
return false;
}
// All the indices checked out.

View File

@ -737,15 +737,12 @@ int64_t DataLayout::getIndexedOffsetInType(Type *ElemTy,
ArrayRef<Value *> Indices) const {
int64_t Result = 0;
// We can use 0 as the address space as we don't need
// to get pointer types back from gep_type_iterator.
unsigned AS = 0;
generic_gep_type_iterator<Value* const*>
GTI = gep_type_begin(ElemTy, AS, Indices),
GTE = gep_type_end(ElemTy, AS, Indices);
GTI = gep_type_begin(ElemTy, Indices),
GTE = gep_type_end(ElemTy, Indices);
for (; GTI != GTE; ++GTI) {
Value *Idx = GTI.getOperand();
if (auto *STy = dyn_cast<StructType>(*GTI)) {
if (StructType *STy = GTI.getStructTypeOrNull()) {
assert(Idx->getType()->isIntegerTy(32) && "Illegal struct idx");
unsigned FieldNo = cast<ConstantInt>(Idx)->getZExtValue();

View File

@ -33,7 +33,7 @@ bool GEPOperator::accumulateConstantOffset(const DataLayout &DL,
continue;
// Handle a struct index, which adds its field offset to the pointer.
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
if (StructType *STy = GTI.getStructTypeOrNull()) {
unsigned ElementIdx = OpC->getZExtValue();
const StructLayout *SL = DL.getStructLayout(STy);
Offset += APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx));

View File

@ -557,7 +557,7 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty)
for (gep_type_iterator GTI = gep_type_begin(U), E = gep_type_end(U);
GTI != E; ++GTI) {
const Value *Op = GTI.getOperand();
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
TmpOffset += SL->getElementOffset(Idx);
@ -4885,7 +4885,7 @@ bool AArch64FastISel::selectGetElementPtr(const Instruction *I) {
for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
GTI != E; ++GTI) {
const Value *Idx = GTI.getOperand();
if (auto *StTy = dyn_cast<StructType>(*GTI)) {
if (auto *StTy = GTI.getStructTypeOrNull()) {
unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
// N = N + Offset
if (Field)

View File

@ -7157,8 +7157,8 @@ bool AArch64TargetLowering::isExtFreeImpl(const Instruction *Ext) const {
case Instruction::GetElementPtr: {
gep_type_iterator GTI = gep_type_begin(Instr);
auto &DL = Ext->getModule()->getDataLayout();
std::advance(GTI, U.getOperandNo());
Type *IdxTy = *GTI;
std::advance(GTI, U.getOperandNo()-1);
Type *IdxTy = GTI.getIndexedType();
// This extension will end up with a shift because of the scaling factor.
// 8-bit sized types have a scaling factor of 1, thus a shift amount of 0.
// Get the shift amount based on the scaling factor:

View File

@ -733,7 +733,7 @@ bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
i != e; ++i, ++GTI) {
const Value *Op = *i;
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
TmpOffset += SL->getElementOffset(Idx);

View File

@ -445,7 +445,7 @@ bool MipsFastISel::computeAddress(const Value *Obj, Address &Addr) {
for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e;
++i, ++GTI) {
const Value *Op = *i;
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
TmpOffset += SL->getElementOffset(Idx);

View File

@ -358,7 +358,7 @@ bool PPCFastISel::PPCComputeAddress(const Value *Obj, Address &Addr) {
for (User::const_op_iterator II = U->op_begin() + 1, IE = U->op_end();
II != IE; ++II, ++GTI) {
const Value *Op = *II;
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
TmpOffset += SL->getElementOffset(Idx);

View File

@ -241,7 +241,7 @@ bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) {
for (gep_type_iterator GTI = gep_type_begin(U), E = gep_type_end(U);
GTI != E; ++GTI) {
const Value *Op = GTI.getOperand();
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
TmpOffset += SL->getElementOffset(Idx);

View File

@ -936,7 +936,7 @@ redo_gep:
for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
i != e; ++i, ++GTI) {
const Value *Op = *i;
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
Disp += SL->getElementOffset(cast<ConstantInt>(Op)->getZExtValue());
continue;

View File

@ -371,14 +371,14 @@ static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
++GEPI; // Skip over the pointer index.
// If this is a use of an array allocation, do a bit more checking for sanity.
if (ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) {
uint64_t NumElements = AT->getNumElements();
if (GEPI.isSequential()) {
ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2));
// Check to make sure that index falls within the array. If not,
// something funny is going on, so we won't do the optimization.
//
if (Idx->getZExtValue() >= NumElements)
if (GEPI.isBoundedSequential() &&
Idx->getZExtValue() >= GEPI.getSequentialNumElements())
return false;
// We cannot scalar repl this level of the array unless any array
@ -391,19 +391,13 @@ static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
for (++GEPI; // Skip array index.
GEPI != E;
++GEPI) {
uint64_t NumElements;
if (ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI))
NumElements = SubArrayTy->getNumElements();
else if (VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI))
NumElements = SubVectorTy->getNumElements();
else {
assert((*GEPI)->isStructTy() &&
"Indexed GEP type is not array, vector, or struct!");
if (GEPI.isStruct())
continue;
}
ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand());
if (!IdxVal || IdxVal->getZExtValue() >= NumElements)
if (!IdxVal ||
(GEPI.isBoundedSequential() &&
IdxVal->getZExtValue() >= GEPI.getSequentialNumElements()))
return false;
}
}

View File

@ -517,7 +517,7 @@ static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC,
if (CI->isZero()) continue;
// Handle a struct index, which adds its field offset to the pointer.
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
if (StructType *STy = GTI.getStructTypeOrNull()) {
Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
} else {
uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
@ -547,7 +547,7 @@ static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC,
if (CI->isZero()) continue;
// Handle a struct index, which adds its field offset to the pointer.
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
if (StructType *STy = GTI.getStructTypeOrNull()) {
Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
} else {
uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());

View File

@ -1389,7 +1389,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
++I, ++GTI) {
// Skip indices into struct types.
if (isa<StructType>(*GTI))
if (GTI.isStruct())
continue;
// Index type should have the same width as IntPtr
@ -1546,7 +1546,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
bool EndsWithSequential = false;
for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
I != E; ++I)
EndsWithSequential = !(*I)->isStructTy();
EndsWithSequential = I.isSequential();
// Can we combine the two pointer arithmetics offsets?
if (EndsWithSequential) {

View File

@ -52,7 +52,7 @@ static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
if (OpC->isZero()) continue; // No offset.
// Handle struct indices, which add their field offset to the pointer.
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
if (StructType *STy = GTI.getStructTypeOrNull()) {
Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
continue;
}

View File

@ -281,9 +281,10 @@ Instruction *NaryReassociatePass::tryReassociateGEP(GetElementPtrInst *GEP) {
return nullptr;
gep_type_iterator GTI = gep_type_begin(*GEP);
for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) {
if (isa<SequentialType>(*GTI++)) {
if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1, *GTI)) {
for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
if (GTI.isSequential()) {
if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1,
GTI.getIndexedType())) {
return NewGEP;
}
}

View File

@ -692,7 +692,7 @@ private:
break;
// Handle a struct index, which adds its field offset to the pointer.
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
if (StructType *STy = GTI.getStructTypeOrNull()) {
unsigned ElementIdx = OpC->getZExtValue();
const StructLayout *SL = DL.getStructLayout(STy);
GEPOffset +=

View File

@ -722,7 +722,7 @@ bool SeparateConstOffsetFromGEP::canonicalizeArrayIndicesToPointerSize(
for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end();
I != E; ++I, ++GTI) {
// Skip struct member indices which must be i32.
if (isa<SequentialType>(*GTI)) {
if (GTI.isSequential()) {
if ((*I)->getType() != IntPtrTy) {
*I = CastInst::CreateIntegerCast(*I, IntPtrTy, true, "idxprom", GEP);
Changed = true;
@ -739,7 +739,7 @@ SeparateConstOffsetFromGEP::accumulateByteOffset(GetElementPtrInst *GEP,
int64_t AccumulativeByteOffset = 0;
gep_type_iterator GTI = gep_type_begin(*GEP);
for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
if (isa<SequentialType>(*GTI)) {
if (GTI.isSequential()) {
// Tries to extract a constant offset from this GEP index.
int64_t ConstantOffset =
ConstantOffsetExtractor::Find(GEP->getOperand(I), GEP, DT);
@ -752,7 +752,7 @@ SeparateConstOffsetFromGEP::accumulateByteOffset(GetElementPtrInst *GEP,
ConstantOffset * DL->getTypeAllocSize(GTI.getIndexedType());
}
} else if (LowerGEP) {
StructType *StTy = cast<StructType>(*GTI);
StructType *StTy = GTI.getStructType();
uint64_t Field = cast<ConstantInt>(GEP->getOperand(I))->getZExtValue();
// Skip field 0 as the offset is always 0.
if (Field != 0) {
@ -787,7 +787,7 @@ void SeparateConstOffsetFromGEP::lowerToSingleIndexGEPs(
// Create an ugly GEP for each sequential index. We don't create GEPs for
// structure indices, as they are accumulated in the constant offset index.
for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
if (isa<SequentialType>(*GTI)) {
if (GTI.isSequential()) {
Value *Idx = Variadic->getOperand(I);
// Skip zero indices.
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
@ -848,7 +848,7 @@ SeparateConstOffsetFromGEP::lowerToArithmetics(GetElementPtrInst *Variadic,
// don't create arithmetics for structure indices, as they are accumulated
// in the constant offset index.
for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
if (isa<SequentialType>(*GTI)) {
if (GTI.isSequential()) {
Value *Idx = Variadic->getOperand(I);
// Skip zero indices.
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
@ -928,7 +928,7 @@ bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
// handle the constant offset and won't need a new structure index.
gep_type_iterator GTI = gep_type_begin(*GEP);
for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
if (isa<SequentialType>(*GTI)) {
if (GTI.isSequential()) {
// Splits this GEP index into a variadic part and a constant offset, and
// uses the variadic part as the new index.
Value *OldIdx = GEP->getOperand(I);

View File

@ -490,8 +490,8 @@ void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP(
IndexExprs.push_back(SE->getSCEV(*I));
gep_type_iterator GTI = gep_type_begin(GEP);
for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) {
if (!isa<SequentialType>(*GTI++))
for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
if (GTI.isStruct())
continue;
const SCEV *OrigIndexExpr = IndexExprs[I - 1];
@ -501,7 +501,7 @@ void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP(
// indices except this current one.
const SCEV *BaseExpr = SE->getGEPExpr(cast<GEPOperator>(GEP), IndexExprs);
Value *ArrayIdx = GEP->getOperand(I);
uint64_t ElementSize = DL->getTypeAllocSize(*GTI);
uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
if (ArrayIdx->getType()->getIntegerBitWidth() <=
DL->getPointerSizeInBits(GEP->getAddressSpace())) {
// Skip factoring if ArrayIdx is wider than the pointer size, because

View File

@ -1416,7 +1416,7 @@ static bool canReplaceOperandWithVariable(const Instruction *I,
if (OpIdx == 0)
return true;
gep_type_iterator It = std::next(gep_type_begin(I), OpIdx - 1);
return !It->isStructTy();
return It.isSequential();
}
}