Use Type::isIntOrPtrTy where possible, NFC
It's a bit neater to write T.isIntOrPtrTy() over `T.isIntegerTy() || T.isPointerTy()`. I used Python's re.sub with this regex to update users: r'([\w.\->()]+)isIntegerTy\(\)\s*\|\|\s*\1isPointerTy\(\)' llvm-svn: 336462
This commit is contained in:
parent
2bd02db943
commit
b3091da3af
|
@ -421,24 +421,21 @@ SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
|
|||
SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
|
||||
const SCEV *op, Type *ty)
|
||||
: SCEVCastExpr(ID, scTruncate, op, ty) {
|
||||
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
|
||||
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
|
||||
assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
|
||||
"Cannot truncate non-integer value!");
|
||||
}
|
||||
|
||||
SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
|
||||
const SCEV *op, Type *ty)
|
||||
: SCEVCastExpr(ID, scZeroExtend, op, ty) {
|
||||
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
|
||||
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
|
||||
assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
|
||||
"Cannot zero extend non-integer value!");
|
||||
}
|
||||
|
||||
SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
|
||||
const SCEV *op, Type *ty)
|
||||
: SCEVCastExpr(ID, scSignExtend, op, ty) {
|
||||
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
|
||||
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
|
||||
assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
|
||||
"Cannot sign extend non-integer value!");
|
||||
}
|
||||
|
||||
|
@ -3699,7 +3696,7 @@ const SCEV *ScalarEvolution::getUnknown(Value *V) {
|
|||
/// target-specific information.
|
||||
bool ScalarEvolution::isSCEVable(Type *Ty) const {
|
||||
// Integers and pointers are always SCEVable.
|
||||
return Ty->isIntegerTy() || Ty->isPointerTy();
|
||||
return Ty->isIntOrPtrTy();
|
||||
}
|
||||
|
||||
/// Return the size in bits of the specified type, for which isSCEVable must
|
||||
|
@ -3944,8 +3941,7 @@ const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
|
|||
const SCEV *
|
||||
ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) {
|
||||
Type *SrcTy = V->getType();
|
||||
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
|
||||
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
|
||||
assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
|
||||
"Cannot truncate or zero extend with non-integer arguments!");
|
||||
if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
|
||||
return V; // No conversion
|
||||
|
@ -3958,8 +3954,7 @@ const SCEV *
|
|||
ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
|
||||
Type *Ty) {
|
||||
Type *SrcTy = V->getType();
|
||||
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
|
||||
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
|
||||
assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
|
||||
"Cannot truncate or zero extend with non-integer arguments!");
|
||||
if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
|
||||
return V; // No conversion
|
||||
|
@ -3971,8 +3966,7 @@ ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
|
|||
const SCEV *
|
||||
ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
|
||||
Type *SrcTy = V->getType();
|
||||
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
|
||||
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
|
||||
assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
|
||||
"Cannot noop or zero extend with non-integer arguments!");
|
||||
assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
|
||||
"getNoopOrZeroExtend cannot truncate!");
|
||||
|
@ -3984,8 +3978,7 @@ ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
|
|||
const SCEV *
|
||||
ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
|
||||
Type *SrcTy = V->getType();
|
||||
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
|
||||
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
|
||||
assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
|
||||
"Cannot noop or sign extend with non-integer arguments!");
|
||||
assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
|
||||
"getNoopOrSignExtend cannot truncate!");
|
||||
|
@ -3997,8 +3990,7 @@ ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
|
|||
const SCEV *
|
||||
ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
|
||||
Type *SrcTy = V->getType();
|
||||
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
|
||||
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
|
||||
assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
|
||||
"Cannot noop or any extend with non-integer arguments!");
|
||||
assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
|
||||
"getNoopOrAnyExtend cannot truncate!");
|
||||
|
@ -4010,8 +4002,7 @@ ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
|
|||
const SCEV *
|
||||
ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
|
||||
Type *SrcTy = V->getType();
|
||||
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
|
||||
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
|
||||
assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
|
||||
"Cannot truncate or noop with non-integer arguments!");
|
||||
assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
|
||||
"getTruncateOrNoop cannot extend!");
|
||||
|
|
|
@ -1129,7 +1129,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
|
|||
}
|
||||
case Instruction::BitCast: {
|
||||
Type *SrcTy = I->getOperand(0)->getType();
|
||||
if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
|
||||
if (SrcTy->isIntOrPtrTy() &&
|
||||
// TODO: For now, not handling conversions like:
|
||||
// (bitcast i64 %x to <2 x i32>)
|
||||
!I->getType()->isVectorTy()) {
|
||||
|
|
|
@ -3235,7 +3235,7 @@ static bool MightBeFoldableInst(Instruction *I) {
|
|||
// Don't touch identity bitcasts.
|
||||
if (I->getType() == I->getOperand(0)->getType())
|
||||
return false;
|
||||
return I->getType()->isPointerTy() || I->getType()->isIntegerTy();
|
||||
return I->getType()->isIntOrPtrTy();
|
||||
case Instruction::PtrToInt:
|
||||
// PtrToInt is always a noop, as we know that the int type is pointer sized.
|
||||
return true;
|
||||
|
@ -3723,8 +3723,7 @@ bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode,
|
|||
case Instruction::BitCast:
|
||||
// BitCast is always a noop, and we can handle it as long as it is
|
||||
// int->int or pointer->pointer (we don't want int<->fp or something).
|
||||
if ((AddrInst->getOperand(0)->getType()->isPointerTy() ||
|
||||
AddrInst->getOperand(0)->getType()->isIntegerTy()) &&
|
||||
if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() &&
|
||||
// Don't touch identity bitcasts. These were probably put here by LSR,
|
||||
// and we don't want to mess around with them. Assume it knows what it
|
||||
// is doing.
|
||||
|
@ -5349,8 +5348,7 @@ bool CodeGenPrepare::optimizeExtUses(Instruction *I) {
|
|||
// x = phi x1', x2'
|
||||
// y = and x, 0xff
|
||||
bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) {
|
||||
if (!Load->isSimple() ||
|
||||
!(Load->getType()->isIntegerTy() || Load->getType()->isPointerTy()))
|
||||
if (!Load->isSimple() || !Load->getType()->isIntOrPtrTy())
|
||||
return false;
|
||||
|
||||
// Skip loads we've already transformed.
|
||||
|
|
|
@ -1553,8 +1553,7 @@ static ICmpInst::Predicate evaluateICmpRelation(Constant *V1, Constant *V2,
|
|||
|
||||
// If the cast is not actually changing bits, and the second operand is a
|
||||
// null pointer, do the comparison with the pre-casted value.
|
||||
if (V2->isNullValue() &&
|
||||
(CE1->getType()->isPointerTy() || CE1->getType()->isIntegerTy())) {
|
||||
if (V2->isNullValue() && CE1->getType()->isIntOrPtrTy()) {
|
||||
if (CE1->getOpcode() == Instruction::ZExt) isSigned = false;
|
||||
if (CE1->getOpcode() == Instruction::SExt) isSigned = true;
|
||||
return evaluateICmpRelation(CE1Op0,
|
||||
|
|
|
@ -3183,8 +3183,7 @@ void Verifier::visitLoadInst(LoadInst &LI) {
|
|||
"Load cannot have Release ordering", &LI);
|
||||
Assert(LI.getAlignment() != 0,
|
||||
"Atomic load must specify explicit alignment", &LI);
|
||||
Assert(ElTy->isIntegerTy() || ElTy->isPointerTy() ||
|
||||
ElTy->isFloatingPointTy(),
|
||||
Assert(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
|
||||
"atomic load operand must have integer, pointer, or floating point "
|
||||
"type!",
|
||||
ElTy, &LI);
|
||||
|
@ -3212,8 +3211,7 @@ void Verifier::visitStoreInst(StoreInst &SI) {
|
|||
"Store cannot have Acquire ordering", &SI);
|
||||
Assert(SI.getAlignment() != 0,
|
||||
"Atomic store must specify explicit alignment", &SI);
|
||||
Assert(ElTy->isIntegerTy() || ElTy->isPointerTy() ||
|
||||
ElTy->isFloatingPointTy(),
|
||||
Assert(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
|
||||
"atomic store operand must have integer, pointer, or floating point "
|
||||
"type!",
|
||||
ElTy, &SI);
|
||||
|
@ -3304,9 +3302,8 @@ void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
|
|||
PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType());
|
||||
Assert(PTy, "First cmpxchg operand must be a pointer.", &CXI);
|
||||
Type *ElTy = PTy->getElementType();
|
||||
Assert(ElTy->isIntegerTy() || ElTy->isPointerTy(),
|
||||
"cmpxchg operand must have integer or pointer type",
|
||||
ElTy, &CXI);
|
||||
Assert(ElTy->isIntOrPtrTy(),
|
||||
"cmpxchg operand must have integer or pointer type", ElTy, &CXI);
|
||||
checkAtomicMemAccessSize(ElTy, &CXI);
|
||||
Assert(ElTy == CXI.getOperand(1)->getType(),
|
||||
"Expected value type does not match pointer operand type!", &CXI,
|
||||
|
|
|
@ -1337,7 +1337,7 @@ void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable *GVar,
|
|||
return;
|
||||
}
|
||||
|
||||
if (ETy->isFloatingPointTy() || ETy->isIntegerTy() || ETy->isPointerTy()) {
|
||||
if (ETy->isFloatingPointTy() || ETy->isIntOrPtrTy()) {
|
||||
O << " .";
|
||||
O << getPTXFundamentalTypeStr(ETy);
|
||||
O << " ";
|
||||
|
|
|
@ -2009,7 +2009,7 @@ void X86TargetLowering::markLibCallAttributes(MachineFunction *MF, unsigned CC,
|
|||
// Mark the first N int arguments as having reg
|
||||
for (unsigned Idx = 0; Idx < Args.size(); Idx++) {
|
||||
Type *T = Args[Idx].Ty;
|
||||
if (T->isPointerTy() || T->isIntegerTy())
|
||||
if (T->isIntOrPtrTy())
|
||||
if (MF->getDataLayout().getTypeAllocSize(T) <= 8) {
|
||||
unsigned numRegs = 1;
|
||||
if (MF->getDataLayout().getTypeAllocSize(T) > 4)
|
||||
|
|
|
@ -437,7 +437,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
|
|||
|
||||
// Are we allowed to form a atomic load or store of this type?
|
||||
static bool isSupportedAtomicType(Type *Ty) {
|
||||
return Ty->isIntegerTy() || Ty->isPointerTy() || Ty->isFloatingPointTy();
|
||||
return Ty->isIntOrPtrTy() || Ty->isFloatingPointTy();
|
||||
}
|
||||
|
||||
/// Helper to combine a load to a new type.
|
||||
|
|
Loading…
Reference in New Issue