Revert the majority of the next patch in the address space series:

r165941: Resubmit the changes to llvm core to update the functions to
         support different pointer sizes on a per address space basis.

Despite this commit log, this change primarily changed stuff outside of
VMCore, and those changes do not carry any tests for correctness (or
even plausibility), and we have consistently found questionable or flat
out incorrect cases in these changes. Most of them are probably correct,
but we need to devise a system that makes it more clear when we have
handled the address space concerns correctly, and ideally each pass that
gets updated would receive an accompanying test case that exercises that
pass specificaly w.r.t. alternate address spaces.

However, from this commit, I have retained the new C API entry points.
Those were an orthogonal change that probably should have been split
apart, but they seem entirely good.

In several places the changes were very obvious cleanups with no actual
multiple address space code added; these I have not reverted when
I spotted them.

In a few other places there were merge conflicts due to a cleaner
solution being implemented later, often not using address spaces at all.
In those cases, I've preserved the new code which isn't address space
dependent.

This is part of my ongoing effort to clean out the partial address space
code which carries high risk and low test coverage, and not likely to be
finished before the 3.2 release looms closer. Duncan and I would both
like to see the above issues addressed before we return to these
changes.

llvm-svn: 167222
This commit is contained in:
Chandler Carruth 2012-11-01 09:14:31 +00:00
parent 7ec5085e01
commit 5da3f0512e
33 changed files with 92 additions and 124 deletions

View File

@ -231,7 +231,9 @@ public:
} }
/// Layout pointer alignment /// Layout pointer alignment
unsigned getPointerABIAlignment(unsigned AS) const { /// FIXME: The defaults need to be removed once all of
/// the backends/clients are updated.
unsigned getPointerABIAlignment(unsigned AS = 0) const {
DenseMap<unsigned, PointerAlignElem>::const_iterator val = Pointers.find(AS); DenseMap<unsigned, PointerAlignElem>::const_iterator val = Pointers.find(AS);
if (val == Pointers.end()) { if (val == Pointers.end()) {
val = Pointers.find(0); val = Pointers.find(0);
@ -239,7 +241,9 @@ public:
return val->second.ABIAlign; return val->second.ABIAlign;
} }
/// Return target's alignment for stack-based pointers /// Return target's alignment for stack-based pointers
unsigned getPointerPrefAlignment(unsigned AS) const { /// FIXME: The defaults need to be removed once all of
/// the backends/clients are updated.
unsigned getPointerPrefAlignment(unsigned AS = 0) const {
DenseMap<unsigned, PointerAlignElem>::const_iterator val = Pointers.find(AS); DenseMap<unsigned, PointerAlignElem>::const_iterator val = Pointers.find(AS);
if (val == Pointers.end()) { if (val == Pointers.end()) {
val = Pointers.find(0); val = Pointers.find(0);
@ -247,7 +251,9 @@ public:
return val->second.PrefAlign; return val->second.PrefAlign;
} }
/// Layout pointer size /// Layout pointer size
unsigned getPointerSize(unsigned AS) const { /// FIXME: The defaults need to be removed once all of
/// the backends/clients are updated.
unsigned getPointerSize(unsigned AS = 0) const {
DenseMap<unsigned, PointerAlignElem>::const_iterator val = Pointers.find(AS); DenseMap<unsigned, PointerAlignElem>::const_iterator val = Pointers.find(AS);
if (val == Pointers.end()) { if (val == Pointers.end()) {
val = Pointers.find(0); val = Pointers.find(0);
@ -255,7 +261,9 @@ public:
return val->second.TypeBitWidth; return val->second.TypeBitWidth;
} }
/// Layout pointer size, in bits /// Layout pointer size, in bits
unsigned getPointerSizeInBits(unsigned AS) const { /// FIXME: The defaults need to be removed once all of
/// the backends/clients are updated.
unsigned getPointerSizeInBits(unsigned AS = 0) const {
return getPointerSize(AS) * 8; return getPointerSize(AS) * 8;
} }
/// Size examples: /// Size examples:

View File

@ -188,8 +188,7 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP,
bool isInBounds = cast<GEPOperator>(GEP)->isInBounds() && !NoAssumptions; bool isInBounds = cast<GEPOperator>(GEP)->isInBounds() && !NoAssumptions;
// Build a mask for high order bits. // Build a mask for high order bits.
unsigned AS = cast<GEPOperator>(GEP)->getPointerAddressSpace(); unsigned IntPtrWidth = TD.getPointerSizeInBits();
unsigned IntPtrWidth = TD.getPointerSizeInBits(AS);
uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth); uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e; for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;

View File

@ -286,8 +286,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
V = GEPOp->getOperand(0); V = GEPOp->getOperand(0);
continue; continue;
} }
unsigned AS = GEPOp->getPointerAddressSpace();
// Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
gep_type_iterator GTI = gep_type_begin(GEPOp); gep_type_iterator GTI = gep_type_begin(GEPOp);
for (User::const_op_iterator I = GEPOp->op_begin()+1, for (User::const_op_iterator I = GEPOp->op_begin()+1,
@ -316,7 +315,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
// If the integer type is smaller than the pointer size, it is implicitly // If the integer type is smaller than the pointer size, it is implicitly
// sign extended to pointer size. // sign extended to pointer size.
unsigned Width = cast<IntegerType>(Index->getType())->getBitWidth(); unsigned Width = cast<IntegerType>(Index->getType())->getBitWidth();
if (TD->getPointerSizeInBits(AS) > Width) if (TD->getPointerSizeInBits() > Width)
Extension = EK_SignExt; Extension = EK_SignExt;
// Use GetLinearExpression to decompose the index into a C1*V+C2 form. // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
@ -345,7 +344,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
// Make sure that we have a scale that makes sense for this target's // Make sure that we have a scale that makes sense for this target's
// pointer size. // pointer size.
if (unsigned ShiftBits = 64-TD->getPointerSizeInBits(AS)) { if (unsigned ShiftBits = 64-TD->getPointerSizeInBits()) {
Scale <<= ShiftBits; Scale <<= ShiftBits;
Scale = (int64_t)Scale >> ShiftBits; Scale = (int64_t)Scale >> ShiftBits;
} }

View File

@ -91,16 +91,14 @@ bool llvm::isInstructionFree(const Instruction *I, const DataLayout *TD) {
// which doesn't contain values outside the range of a pointer. // which doesn't contain values outside the range of a pointer.
if (isa<IntToPtrInst>(CI) && TD && if (isa<IntToPtrInst>(CI) && TD &&
TD->isLegalInteger(Op->getType()->getScalarSizeInBits()) && TD->isLegalInteger(Op->getType()->getScalarSizeInBits()) &&
Op->getType()->getScalarSizeInBits() <= TD->getPointerSizeInBits( Op->getType()->getScalarSizeInBits() <= TD->getPointerSizeInBits())
cast<IntToPtrInst>(CI)->getAddressSpace()))
return true; return true;
// A ptrtoint cast is free so long as the result is large enough to store // A ptrtoint cast is free so long as the result is large enough to store
// the pointer, and a legal integer type. // the pointer, and a legal integer type.
if (isa<PtrToIntInst>(CI) && TD && if (isa<PtrToIntInst>(CI) && TD &&
TD->isLegalInteger(Op->getType()->getScalarSizeInBits()) && TD->isLegalInteger(Op->getType()->getScalarSizeInBits()) &&
Op->getType()->getScalarSizeInBits() >= TD->getPointerSizeInBits( Op->getType()->getScalarSizeInBits() >= TD->getPointerSizeInBits())
cast<PtrToIntInst>(CI)->getPointerAddressSpace()))
return true; return true;
// trunc to a native type is free (assuming the target has compare and // trunc to a native type is free (assuming the target has compare and

View File

@ -916,11 +916,10 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
if (TD && CE->getOpcode() == Instruction::IntToPtr) { if (TD && CE->getOpcode() == Instruction::IntToPtr) {
Constant *Input = CE->getOperand(0); Constant *Input = CE->getOperand(0);
unsigned InWidth = Input->getType()->getScalarSizeInBits(); unsigned InWidth = Input->getType()->getScalarSizeInBits();
unsigned AS = cast<PointerType>(CE->getType())->getAddressSpace(); if (TD->getPointerSizeInBits() < InWidth) {
if (TD->getPointerSizeInBits(AS) < InWidth) {
Constant *Mask = Constant *Mask =
ConstantInt::get(CE->getContext(), APInt::getLowBitsSet(InWidth, ConstantInt::get(CE->getContext(), APInt::getLowBitsSet(InWidth,
TD->getPointerSizeInBits(AS))); TD->getPointerSizeInBits()));
Input = ConstantExpr::getAnd(Input, Mask); Input = ConstantExpr::getAnd(Input, Mask);
} }
// Do a zext or trunc to get to the dest size. // Do a zext or trunc to get to the dest size.
@ -933,10 +932,9 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
// the int size is >= the ptr size. This requires knowing the width of a // the int size is >= the ptr size. This requires knowing the width of a
// pointer, so it can't be done in ConstantExpr::getCast. // pointer, so it can't be done in ConstantExpr::getCast.
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0]))
if (TD && CE->getOpcode() == Instruction::PtrToInt && if (TD &&
TD->getPointerSizeInBits( TD->getPointerSizeInBits() <= CE->getType()->getScalarSizeInBits() &&
cast<PointerType>(CE->getOperand(0)->getType())->getAddressSpace()) CE->getOpcode() == Instruction::PtrToInt)
<= CE->getType()->getScalarSizeInBits())
return FoldBitCast(CE->getOperand(0), DestTy, *TD); return FoldBitCast(CE->getOperand(0), DestTy, *TD);
return ConstantExpr::getCast(Opcode, Ops[0], DestTy); return ConstantExpr::getCast(Opcode, Ops[0], DestTy);

View File

@ -243,8 +243,7 @@ bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
if (!TD) if (!TD)
return false; return false;
unsigned AS = GEP.getPointerAddressSpace(); unsigned IntPtrWidth = TD->getPointerSizeInBits();
unsigned IntPtrWidth = TD->getPointerSizeInBits(AS);
assert(IntPtrWidth == Offset.getBitWidth()); assert(IntPtrWidth == Offset.getBitWidth());
for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
@ -392,8 +391,7 @@ bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
// Track base/offset pairs when converted to a plain integer provided the // Track base/offset pairs when converted to a plain integer provided the
// integer is large enough to represent the pointer. // integer is large enough to represent the pointer.
unsigned IntegerSize = I.getType()->getScalarSizeInBits(); unsigned IntegerSize = I.getType()->getScalarSizeInBits();
unsigned AS = I.getPointerAddressSpace(); if (TD && IntegerSize >= TD->getPointerSizeInBits()) {
if (TD && IntegerSize >= TD->getPointerSizeInBits(AS)) {
std::pair<Value *, APInt> BaseAndOffset std::pair<Value *, APInt> BaseAndOffset
= ConstantOffsetPtrs.lookup(I.getOperand(0)); = ConstantOffsetPtrs.lookup(I.getOperand(0));
if (BaseAndOffset.first) if (BaseAndOffset.first)
@ -427,8 +425,7 @@ bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
// modifications provided the integer is not too large. // modifications provided the integer is not too large.
Value *Op = I.getOperand(0); Value *Op = I.getOperand(0);
unsigned IntegerSize = Op->getType()->getScalarSizeInBits(); unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
unsigned AS = I.getAddressSpace(); if (TD && IntegerSize <= TD->getPointerSizeInBits()) {
if (TD && IntegerSize <= TD->getPointerSizeInBits(AS)) {
std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op); std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
if (BaseAndOffset.first) if (BaseAndOffset.first)
ConstantOffsetPtrs[&I] = BaseAndOffset; ConstantOffsetPtrs[&I] = BaseAndOffset;
@ -763,8 +760,7 @@ ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
if (!TD || !V->getType()->isPointerTy()) if (!TD || !V->getType()->isPointerTy())
return 0; return 0;
unsigned AS = cast<PointerType>(V->getType())->getAddressSpace();; unsigned IntPtrWidth = TD->getPointerSizeInBits();
unsigned IntPtrWidth = TD->getPointerSizeInBits(AS);
APInt Offset = APInt::getNullValue(IntPtrWidth); APInt Offset = APInt::getNullValue(IntPtrWidth);
// Even though we don't look through PHI nodes, we could be called on an // Even though we don't look through PHI nodes, we could be called on an
@ -828,8 +824,7 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
// size of the byval type by the target's pointer size. // size of the byval type by the target's pointer size.
PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType()); PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
unsigned TypeSize = TD->getTypeSizeInBits(PTy->getElementType()); unsigned TypeSize = TD->getTypeSizeInBits(PTy->getElementType());
unsigned AS = PTy->getAddressSpace(); unsigned PointerSize = TD->getPointerSizeInBits();
unsigned PointerSize = TD->getPointerSizeInBits(AS);
// Ceiling division. // Ceiling division.
unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize; unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;

View File

@ -666,8 +666,7 @@ Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
/// 'Offset' APInt must be the bitwidth of the target's pointer size. /// 'Offset' APInt must be the bitwidth of the target's pointer size.
static bool accumulateGEPOffset(const DataLayout &TD, GEPOperator *GEP, static bool accumulateGEPOffset(const DataLayout &TD, GEPOperator *GEP,
APInt &Offset) { APInt &Offset) {
unsigned AS = GEP->getPointerAddressSpace(); unsigned IntPtrWidth = TD.getPointerSizeInBits();
unsigned IntPtrWidth = TD.getPointerSizeInBits(AS);
assert(IntPtrWidth == Offset.getBitWidth()); assert(IntPtrWidth == Offset.getBitWidth());
gep_type_iterator GTI = gep_type_begin(GEP); gep_type_iterator GTI = gep_type_begin(GEP);
@ -697,14 +696,12 @@ static bool accumulateGEPOffset(const DataLayout &TD, GEPOperator *GEP,
/// accumulates the total constant offset applied in the returned constant. It /// accumulates the total constant offset applied in the returned constant. It
/// returns 0 if V is not a pointer, and returns the constant '0' if there are /// returns 0 if V is not a pointer, and returns the constant '0' if there are
/// no constant offsets applied. /// no constant offsets applied.
/// FIXME: This function also exists in InlineCost.cpp.
static Constant *stripAndComputeConstantOffsets(const DataLayout &TD, static Constant *stripAndComputeConstantOffsets(const DataLayout &TD,
Value *&V) { Value *&V) {
if (!V->getType()->isPointerTy()) if (!V->getType()->isPointerTy())
return 0; return 0;
unsigned AS = cast<PointerType>(V->getType())->getAddressSpace();; unsigned IntPtrWidth = TD.getPointerSizeInBits();
unsigned IntPtrWidth = TD.getPointerSizeInBits(AS);
APInt Offset = APInt::getNullValue(IntPtrWidth); APInt Offset = APInt::getNullValue(IntPtrWidth);
// Even though we don't look through PHI nodes, we could be called on an // Even though we don't look through PHI nodes, we could be called on an
@ -1880,9 +1877,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
// if the integer type is the same size as the pointer type. // if the integer type is the same size as the pointer type.
if (MaxRecurse && Q.TD && isa<PtrToIntInst>(LI) && if (MaxRecurse && Q.TD && isa<PtrToIntInst>(LI) &&
Q.TD->getPointerSizeInBits( Q.TD->getPointerSizeInBits() == DstTy->getPrimitiveSizeInBits()) {
cast<PtrToIntInst>(LI)->getPointerAddressSpace()) ==
DstTy->getPrimitiveSizeInBits()) {
if (Constant *RHSC = dyn_cast<Constant>(RHS)) { if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
// Transfer the cast to the constant. // Transfer the cast to the constant.
if (Value *V = SimplifyICmpInst(Pred, SrcOp, if (Value *V = SimplifyICmpInst(Pred, SrcOp,

View File

@ -40,8 +40,7 @@ static unsigned getBitWidth(Type *Ty, const DataLayout *TD) {
if (unsigned BitWidth = Ty->getScalarSizeInBits()) if (unsigned BitWidth = Ty->getScalarSizeInBits())
return BitWidth; return BitWidth;
assert(isa<PointerType>(Ty) && "Expected a pointer type!"); assert(isa<PointerType>(Ty) && "Expected a pointer type!");
return TD ? return TD ? TD->getPointerSizeInBits() : 0;
TD->getPointerSizeInBits(cast<PointerType>(Ty)->getAddressSpace()) : 0;
} }
static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW, static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
@ -1620,8 +1619,7 @@ Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
// Re-sign extend from the pointer size if needed to get overflow edge cases // Re-sign extend from the pointer size if needed to get overflow edge cases
// right. // right.
unsigned AS = GEP->getPointerAddressSpace(); unsigned PtrSize = TD.getPointerSizeInBits();
unsigned PtrSize = TD.getPointerSizeInBits(AS);
if (PtrSize < 64) if (PtrSize < 64)
Offset = SignExtend64(Offset, PtrSize); Offset = SignExtend64(Offset, PtrSize);

View File

@ -385,8 +385,7 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
// - __tlv_bootstrap - used to make sure support exists // - __tlv_bootstrap - used to make sure support exists
// - spare pointer, used when mapped by the runtime // - spare pointer, used when mapped by the runtime
// - pointer to mangled symbol above with initializer // - pointer to mangled symbol above with initializer
unsigned AS = GV->getType()->getAddressSpace(); unsigned PtrSize = TD->getPointerSizeInBits()/8;
unsigned PtrSize = TD->getPointerSizeInBits(AS)/8;
OutStreamer.EmitSymbolValue(GetExternalSymbolSymbol("_tlv_bootstrap"), OutStreamer.EmitSymbolValue(GetExternalSymbolSymbol("_tlv_bootstrap"),
PtrSize, 0); PtrSize, 0);
OutStreamer.EmitIntValue(0, PtrSize, 0); OutStreamer.EmitIntValue(0, PtrSize, 0);
@ -1300,7 +1299,7 @@ void AsmPrinter::EmitXXStructorList(const Constant *List, bool isCtor) {
// Emit the function pointers in the target-specific order // Emit the function pointers in the target-specific order
const DataLayout *TD = TM.getDataLayout(); const DataLayout *TD = TM.getDataLayout();
unsigned Align = Log2_32(TD->getPointerPrefAlignment(0)); unsigned Align = Log2_32(TD->getPointerPrefAlignment());
std::stable_sort(Structors.begin(), Structors.end(), priority_order); std::stable_sort(Structors.begin(), Structors.end(), priority_order);
for (unsigned i = 0, e = Structors.size(); i != e; ++i) { for (unsigned i = 0, e = Structors.size(); i != e; ++i) {
const MCSection *OutputSection = const MCSection *OutputSection =
@ -1481,9 +1480,8 @@ static const MCExpr *lowerConstant(const Constant *CV, AsmPrinter &AP) {
if (Offset == 0) if (Offset == 0)
return Base; return Base;
unsigned AS = cast<PointerType>(CE->getType())->getAddressSpace();
// Truncate/sext the offset to the pointer size. // Truncate/sext the offset to the pointer size.
unsigned Width = TD.getPointerSizeInBits(AS); unsigned Width = TD.getPointerSizeInBits();
if (Width < 64) if (Width < 64)
Offset = SignExtend64(Offset, Width); Offset = SignExtend64(Offset, Width);

View File

@ -112,7 +112,7 @@ unsigned AsmPrinter::GetSizeOfEncodedValue(unsigned Encoding) const {
switch (Encoding & 0x07) { switch (Encoding & 0x07) {
default: llvm_unreachable("Invalid encoded value."); default: llvm_unreachable("Invalid encoded value.");
case dwarf::DW_EH_PE_absptr: return TM.getDataLayout()->getPointerSize(0); case dwarf::DW_EH_PE_absptr: return TM.getDataLayout()->getPointerSize();
case dwarf::DW_EH_PE_udata2: return 2; case dwarf::DW_EH_PE_udata2: return 2;
case dwarf::DW_EH_PE_udata4: return 4; case dwarf::DW_EH_PE_udata4: return 4;
case dwarf::DW_EH_PE_udata8: return 8; case dwarf::DW_EH_PE_udata8: return 8;

View File

@ -200,7 +200,7 @@ void DIEInteger::EmitValue(AsmPrinter *Asm, unsigned Form) const {
case dwarf::DW_FORM_udata: Asm->EmitULEB128(Integer); return; case dwarf::DW_FORM_udata: Asm->EmitULEB128(Integer); return;
case dwarf::DW_FORM_sdata: Asm->EmitSLEB128(Integer); return; case dwarf::DW_FORM_sdata: Asm->EmitSLEB128(Integer); return;
case dwarf::DW_FORM_addr: case dwarf::DW_FORM_addr:
Size = Asm->getDataLayout().getPointerSize(0); break; Size = Asm->getDataLayout().getPointerSize(); break;
default: llvm_unreachable("DIE Value form not supported yet"); default: llvm_unreachable("DIE Value form not supported yet");
} }
Asm->OutStreamer.EmitIntValue(Integer, Size, 0/*addrspace*/); Asm->OutStreamer.EmitIntValue(Integer, Size, 0/*addrspace*/);
@ -222,7 +222,7 @@ unsigned DIEInteger::SizeOf(AsmPrinter *AP, unsigned Form) const {
case dwarf::DW_FORM_data8: return sizeof(int64_t); case dwarf::DW_FORM_data8: return sizeof(int64_t);
case dwarf::DW_FORM_udata: return MCAsmInfo::getULEB128Size(Integer); case dwarf::DW_FORM_udata: return MCAsmInfo::getULEB128Size(Integer);
case dwarf::DW_FORM_sdata: return MCAsmInfo::getSLEB128Size(Integer); case dwarf::DW_FORM_sdata: return MCAsmInfo::getSLEB128Size(Integer);
case dwarf::DW_FORM_addr: return AP->getDataLayout().getPointerSize(0); case dwarf::DW_FORM_addr: return AP->getDataLayout().getPointerSize();
default: llvm_unreachable("DIE Value form not supported yet"); default: llvm_unreachable("DIE Value form not supported yet");
} }
} }
@ -249,7 +249,7 @@ void DIELabel::EmitValue(AsmPrinter *AP, unsigned Form) const {
unsigned DIELabel::SizeOf(AsmPrinter *AP, unsigned Form) const { unsigned DIELabel::SizeOf(AsmPrinter *AP, unsigned Form) const {
if (Form == dwarf::DW_FORM_data4) return 4; if (Form == dwarf::DW_FORM_data4) return 4;
if (Form == dwarf::DW_FORM_strp) return 4; if (Form == dwarf::DW_FORM_strp) return 4;
return AP->getDataLayout().getPointerSize(0); return AP->getDataLayout().getPointerSize();
} }
#ifndef NDEBUG #ifndef NDEBUG
@ -273,7 +273,7 @@ void DIEDelta::EmitValue(AsmPrinter *AP, unsigned Form) const {
unsigned DIEDelta::SizeOf(AsmPrinter *AP, unsigned Form) const { unsigned DIEDelta::SizeOf(AsmPrinter *AP, unsigned Form) const {
if (Form == dwarf::DW_FORM_data4) return 4; if (Form == dwarf::DW_FORM_data4) return 4;
if (Form == dwarf::DW_FORM_strp) return 4; if (Form == dwarf::DW_FORM_strp) return 4;
return AP->getDataLayout().getPointerSize(0); return AP->getDataLayout().getPointerSize();
} }
#ifndef NDEBUG #ifndef NDEBUG

View File

@ -384,7 +384,7 @@ DIE *DwarfDebug::constructLexicalScopeDIE(CompileUnit *TheCU,
// DW_AT_ranges appropriately. // DW_AT_ranges appropriately.
TheCU->addUInt(ScopeDIE, dwarf::DW_AT_ranges, dwarf::DW_FORM_data4, TheCU->addUInt(ScopeDIE, dwarf::DW_AT_ranges, dwarf::DW_FORM_data4,
DebugRangeSymbols.size() DebugRangeSymbols.size()
* Asm->getDataLayout().getPointerSize(0)); * Asm->getDataLayout().getPointerSize());
for (SmallVector<InsnRange, 4>::const_iterator RI = Ranges.begin(), for (SmallVector<InsnRange, 4>::const_iterator RI = Ranges.begin(),
RE = Ranges.end(); RI != RE; ++RI) { RE = Ranges.end(); RI != RE; ++RI) {
DebugRangeSymbols.push_back(getLabelBeforeInsn(RI->first)); DebugRangeSymbols.push_back(getLabelBeforeInsn(RI->first));
@ -450,7 +450,7 @@ DIE *DwarfDebug::constructInlinedScopeDIE(CompileUnit *TheCU,
// DW_AT_ranges appropriately. // DW_AT_ranges appropriately.
TheCU->addUInt(ScopeDIE, dwarf::DW_AT_ranges, dwarf::DW_FORM_data4, TheCU->addUInt(ScopeDIE, dwarf::DW_AT_ranges, dwarf::DW_FORM_data4,
DebugRangeSymbols.size() DebugRangeSymbols.size()
* Asm->getDataLayout().getPointerSize(0)); * Asm->getDataLayout().getPointerSize());
for (SmallVector<InsnRange, 4>::const_iterator RI = Ranges.begin(), for (SmallVector<InsnRange, 4>::const_iterator RI = Ranges.begin(),
RE = Ranges.end(); RI != RE; ++RI) { RE = Ranges.end(); RI != RE; ++RI) {
DebugRangeSymbols.push_back(getLabelBeforeInsn(RI->first)); DebugRangeSymbols.push_back(getLabelBeforeInsn(RI->first));
@ -1765,7 +1765,7 @@ void DwarfDebug::emitDebugInfo() {
Asm->EmitSectionOffset(Asm->GetTempSymbol("abbrev_begin"), Asm->EmitSectionOffset(Asm->GetTempSymbol("abbrev_begin"),
DwarfAbbrevSectionSym); DwarfAbbrevSectionSym);
Asm->OutStreamer.AddComment("Address Size (in bytes)"); Asm->OutStreamer.AddComment("Address Size (in bytes)");
Asm->EmitInt8(Asm->getDataLayout().getPointerSize(0)); Asm->EmitInt8(Asm->getDataLayout().getPointerSize());
emitDIE(Die); emitDIE(Die);
Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("info_end", TheCU->getID())); Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("info_end", TheCU->getID()));
@ -1811,14 +1811,14 @@ void DwarfDebug::emitEndOfLineMatrix(unsigned SectionEnd) {
Asm->EmitInt8(0); Asm->EmitInt8(0);
Asm->OutStreamer.AddComment("Op size"); Asm->OutStreamer.AddComment("Op size");
Asm->EmitInt8(Asm->getDataLayout().getPointerSize(0) + 1); Asm->EmitInt8(Asm->getDataLayout().getPointerSize() + 1);
Asm->OutStreamer.AddComment("DW_LNE_set_address"); Asm->OutStreamer.AddComment("DW_LNE_set_address");
Asm->EmitInt8(dwarf::DW_LNE_set_address); Asm->EmitInt8(dwarf::DW_LNE_set_address);
Asm->OutStreamer.AddComment("Section end label"); Asm->OutStreamer.AddComment("Section end label");
Asm->OutStreamer.EmitSymbolValue(Asm->GetTempSymbol("section_end",SectionEnd), Asm->OutStreamer.EmitSymbolValue(Asm->GetTempSymbol("section_end",SectionEnd),
Asm->getDataLayout().getPointerSize(0), Asm->getDataLayout().getPointerSize(),
0/*AddrSpace*/); 0/*AddrSpace*/);
// Mark end of matrix. // Mark end of matrix.
@ -2047,7 +2047,7 @@ void DwarfDebug::emitDebugLoc() {
// Start the dwarf loc section. // Start the dwarf loc section.
Asm->OutStreamer.SwitchSection( Asm->OutStreamer.SwitchSection(
Asm->getObjFileLowering().getDwarfLocSection()); Asm->getObjFileLowering().getDwarfLocSection());
unsigned char Size = Asm->getDataLayout().getPointerSize(0); unsigned char Size = Asm->getDataLayout().getPointerSize();
Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("debug_loc", 0)); Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("debug_loc", 0));
unsigned index = 1; unsigned index = 1;
for (SmallVector<DotDebugLocEntry, 4>::iterator for (SmallVector<DotDebugLocEntry, 4>::iterator
@ -2144,7 +2144,7 @@ void DwarfDebug::emitDebugRanges() {
// Start the dwarf ranges section. // Start the dwarf ranges section.
Asm->OutStreamer.SwitchSection( Asm->OutStreamer.SwitchSection(
Asm->getObjFileLowering().getDwarfRangesSection()); Asm->getObjFileLowering().getDwarfRangesSection());
unsigned char Size = Asm->getDataLayout().getPointerSize(0); unsigned char Size = Asm->getDataLayout().getPointerSize();
for (SmallVector<const MCSymbol *, 8>::iterator for (SmallVector<const MCSymbol *, 8>::iterator
I = DebugRangeSymbols.begin(), E = DebugRangeSymbols.end(); I = DebugRangeSymbols.begin(), E = DebugRangeSymbols.end();
I != E; ++I) { I != E; ++I) {
@ -2202,7 +2202,7 @@ void DwarfDebug::emitDebugInlineInfo() {
Asm->OutStreamer.AddComment("Dwarf Version"); Asm->OutStreamer.AddComment("Dwarf Version");
Asm->EmitInt16(dwarf::DWARF_VERSION); Asm->EmitInt16(dwarf::DWARF_VERSION);
Asm->OutStreamer.AddComment("Address Size (in bytes)"); Asm->OutStreamer.AddComment("Address Size (in bytes)");
Asm->EmitInt8(Asm->getDataLayout().getPointerSize(0)); Asm->EmitInt8(Asm->getDataLayout().getPointerSize());
for (SmallVector<const MDNode *, 4>::iterator I = InlinedSPNodes.begin(), for (SmallVector<const MDNode *, 4>::iterator I = InlinedSPNodes.begin(),
E = InlinedSPNodes.end(); I != E; ++I) { E = InlinedSPNodes.end(); I != E; ++I) {
@ -2233,7 +2233,7 @@ void DwarfDebug::emitDebugInlineInfo() {
if (Asm->isVerbose()) Asm->OutStreamer.AddComment("low_pc"); if (Asm->isVerbose()) Asm->OutStreamer.AddComment("low_pc");
Asm->OutStreamer.EmitSymbolValue(LI->first, Asm->OutStreamer.EmitSymbolValue(LI->first,
Asm->getDataLayout().getPointerSize(0),0); Asm->getDataLayout().getPointerSize(),0);
} }
} }

View File

@ -417,7 +417,7 @@ void DwarfException::EmitExceptionTable() {
// that we're omitting that bit. // that we're omitting that bit.
TTypeEncoding = dwarf::DW_EH_PE_omit; TTypeEncoding = dwarf::DW_EH_PE_omit;
// dwarf::DW_EH_PE_absptr // dwarf::DW_EH_PE_absptr
TypeFormatSize = Asm->getDataLayout().getPointerSize(0); TypeFormatSize = Asm->getDataLayout().getPointerSize();
} else { } else {
// Okay, we have actual filters or typeinfos to emit. As such, we need to // Okay, we have actual filters or typeinfos to emit. As such, we need to
// pick a type encoding for them. We're about to emit a list of pointers to // pick a type encoding for them. We're about to emit a list of pointers to

View File

@ -91,7 +91,7 @@ void OcamlGCMetadataPrinter::beginAssembly(AsmPrinter &AP) {
/// either condition is detected in a function which uses the GC. /// either condition is detected in a function which uses the GC.
/// ///
void OcamlGCMetadataPrinter::finishAssembly(AsmPrinter &AP) { void OcamlGCMetadataPrinter::finishAssembly(AsmPrinter &AP) {
unsigned IntPtrSize = AP.TM.getDataLayout()->getPointerSize(0); unsigned IntPtrSize = AP.TM.getDataLayout()->getPointerSize();
AP.OutStreamer.SwitchSection(AP.getObjFileLowering().getTextSection()); AP.OutStreamer.SwitchSection(AP.getObjFileLowering().getTextSection());
EmitCamlGlobal(getModule(), AP, "code_end"); EmitCamlGlobal(getModule(), AP, "code_end");

View File

@ -550,7 +550,7 @@ unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
// address of a block, in which case it is the pointer size. // address of a block, in which case it is the pointer size.
switch (getEntryKind()) { switch (getEntryKind()) {
case MachineJumpTableInfo::EK_BlockAddress: case MachineJumpTableInfo::EK_BlockAddress:
return TD.getPointerSize(0); return TD.getPointerSize();
case MachineJumpTableInfo::EK_GPRel64BlockAddress: case MachineJumpTableInfo::EK_GPRel64BlockAddress:
return 8; return 8;
case MachineJumpTableInfo::EK_GPRel32BlockAddress: case MachineJumpTableInfo::EK_GPRel32BlockAddress:
@ -570,7 +570,7 @@ unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
// alignment. // alignment.
switch (getEntryKind()) { switch (getEntryKind()) {
case MachineJumpTableInfo::EK_BlockAddress: case MachineJumpTableInfo::EK_BlockAddress:
return TD.getPointerABIAlignment(0); return TD.getPointerABIAlignment();
case MachineJumpTableInfo::EK_GPRel64BlockAddress: case MachineJumpTableInfo::EK_GPRel64BlockAddress:
return TD.getABIIntegerTypeAlignment(64); return TD.getABIIntegerTypeAlignment(64);
case MachineJumpTableInfo::EK_GPRel32BlockAddress: case MachineJumpTableInfo::EK_GPRel32BlockAddress:

View File

@ -3436,12 +3436,9 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign, EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
IsZeroVal, MemcpyStrSrc, IsZeroVal, MemcpyStrSrc,
DAG.getMachineFunction()); DAG.getMachineFunction());
Type *vtType = VT.isExtended() ? VT.getTypeForEVT(*DAG.getContext()) : NULL;
unsigned AS = (vtType && vtType->isPointerTy()) ?
cast<PointerType>(vtType)->getAddressSpace() : 0;
if (VT == MVT::Other) { if (VT == MVT::Other) {
if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment(AS) || if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment() ||
TLI.allowsUnalignedMemoryAccesses(VT)) { TLI.allowsUnalignedMemoryAccesses(VT)) {
VT = TLI.getPointerTy(); VT = TLI.getPointerTy();
} else { } else {

View File

@ -77,9 +77,9 @@ void TargetLoweringObjectFileELF::emitPersonalityValue(MCStreamer &Streamer,
Flags, Flags,
SectionKind::getDataRel(), SectionKind::getDataRel(),
0, Label->getName()); 0, Label->getName());
unsigned Size = TM.getDataLayout()->getPointerSize(0); unsigned Size = TM.getDataLayout()->getPointerSize();
Streamer.SwitchSection(Sec); Streamer.SwitchSection(Sec);
Streamer.EmitValueToAlignment(TM.getDataLayout()->getPointerABIAlignment(0)); Streamer.EmitValueToAlignment(TM.getDataLayout()->getPointerABIAlignment());
Streamer.EmitSymbolAttribute(Label, MCSA_ELF_TypeObject); Streamer.EmitSymbolAttribute(Label, MCSA_ELF_TypeObject);
const MCExpr *E = MCConstantExpr::Create(Size, getContext()); const MCExpr *E = MCConstantExpr::Create(Size, getContext());
Streamer.EmitELFSize(Label, E); Streamer.EmitELFSize(Label, E);

View File

@ -17,7 +17,6 @@
#include "llvm/Constants.h" #include "llvm/Constants.h"
#include "llvm/DerivedTypes.h" #include "llvm/DerivedTypes.h"
#include "llvm/Instructions.h"
#include "llvm/Module.h" #include "llvm/Module.h"
#include "llvm/ExecutionEngine/GenericValue.h" #include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallString.h"
@ -268,7 +267,7 @@ public:
void *ArgvArray::reset(LLVMContext &C, ExecutionEngine *EE, void *ArgvArray::reset(LLVMContext &C, ExecutionEngine *EE,
const std::vector<std::string> &InputArgv) { const std::vector<std::string> &InputArgv) {
clear(); // Free the old contents. clear(); // Free the old contents.
unsigned PtrSize = EE->getDataLayout()->getPointerSize(0); unsigned PtrSize = EE->getDataLayout()->getPointerSize();
Array = new char[(InputArgv.size()+1)*PtrSize]; Array = new char[(InputArgv.size()+1)*PtrSize];
DEBUG(dbgs() << "JIT: ARGV = " << (void*)Array << "\n"); DEBUG(dbgs() << "JIT: ARGV = " << (void*)Array << "\n");
@ -343,7 +342,7 @@ void ExecutionEngine::runStaticConstructorsDestructors(bool isDtors) {
#ifndef NDEBUG #ifndef NDEBUG
/// isTargetNullPtr - Return whether the target pointer stored at Loc is null. /// isTargetNullPtr - Return whether the target pointer stored at Loc is null.
static bool isTargetNullPtr(ExecutionEngine *EE, void *Loc) { static bool isTargetNullPtr(ExecutionEngine *EE, void *Loc) {
unsigned PtrSize = EE->getDataLayout()->getPointerSize(0); unsigned PtrSize = EE->getDataLayout()->getPointerSize();
for (unsigned i = 0; i < PtrSize; ++i) for (unsigned i = 0; i < PtrSize; ++i)
if (*(i + (uint8_t*)Loc)) if (*(i + (uint8_t*)Loc))
return false; return false;

View File

@ -1054,8 +1054,7 @@ GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, Type *DstTy,
GenericValue Dest, Src = getOperandValue(SrcVal, SF); GenericValue Dest, Src = getOperandValue(SrcVal, SF);
assert(DstTy->isPointerTy() && "Invalid PtrToInt instruction"); assert(DstTy->isPointerTy() && "Invalid PtrToInt instruction");
unsigned AS = cast<PointerType>(DstTy)->getAddressSpace(); uint32_t PtrSize = TD.getPointerSizeInBits();
uint32_t PtrSize = TD.getPointerSizeInBits(AS);
if (PtrSize != Src.IntVal.getBitWidth()) if (PtrSize != Src.IntVal.getBitWidth())
Src.IntVal = Src.IntVal.zextOrTrunc(PtrSize); Src.IntVal = Src.IntVal.zextOrTrunc(PtrSize);

View File

@ -376,7 +376,7 @@ GenericValue lle_X_sprintf(FunctionType *FT,
case 'x': case 'X': case 'x': case 'X':
if (HowLong >= 1) { if (HowLong >= 1) {
if (HowLong == 1 && if (HowLong == 1 &&
TheInterpreter->getDataLayout()->getPointerSizeInBits(0) == 64 && TheInterpreter->getDataLayout()->getPointerSizeInBits() == 64 &&
sizeof(long) < sizeof(int64_t)) { sizeof(long) < sizeof(int64_t)) {
// Make sure we use %lld with a 64 bit argument because we might be // Make sure we use %lld with a 64 bit argument because we might be
// compiling LLI on a 32 bit compiler. // compiling LLI on a 32 bit compiler.

View File

@ -14,9 +14,7 @@
#include "JIT.h" #include "JIT.h"
#include "JITDwarfEmitter.h" #include "JITDwarfEmitter.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h" #include "llvm/Function.h"
#include "llvm/GlobalVariable.h"
#include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/JITCodeEmitter.h" #include "llvm/CodeGen/JITCodeEmitter.h"
#include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineFunction.h"
@ -68,7 +66,7 @@ unsigned char* JITDwarfEmitter::EmitDwarfTable(MachineFunction& F,
void void
JITDwarfEmitter::EmitFrameMoves(intptr_t BaseLabelPtr, JITDwarfEmitter::EmitFrameMoves(intptr_t BaseLabelPtr,
const std::vector<MachineMove> &Moves) const { const std::vector<MachineMove> &Moves) const {
unsigned PointerSize = TD->getPointerSize(0); unsigned PointerSize = TD->getPointerSize();
int stackGrowth = stackGrowthDirection == TargetFrameLowering::StackGrowsUp ? int stackGrowth = stackGrowthDirection == TargetFrameLowering::StackGrowsUp ?
PointerSize : -PointerSize; PointerSize : -PointerSize;
MCSymbol *BaseLabel = 0; MCSymbol *BaseLabel = 0;
@ -380,7 +378,7 @@ unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF,
for (unsigned i = 0, e = CallSites.size(); i < e; ++i) for (unsigned i = 0, e = CallSites.size(); i < e; ++i)
SizeSites += MCAsmInfo::getULEB128Size(CallSites[i].Action); SizeSites += MCAsmInfo::getULEB128Size(CallSites[i].Action);
unsigned SizeTypes = TypeInfos.size() * TD->getPointerSize(0); unsigned SizeTypes = TypeInfos.size() * TD->getPointerSize();
unsigned TypeOffset = sizeof(int8_t) + // Call site format unsigned TypeOffset = sizeof(int8_t) + // Call site format
// Call-site table length // Call-site table length
@ -456,12 +454,12 @@ unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF,
const GlobalVariable *GV = TypeInfos[M - 1]; const GlobalVariable *GV = TypeInfos[M - 1];
if (GV) { if (GV) {
if (TD->getPointerSize(GV->getType()->getAddressSpace()) == sizeof(int32_t)) if (TD->getPointerSize() == sizeof(int32_t))
JCE->emitInt32((intptr_t)Jit.getOrEmitGlobalVariable(GV)); JCE->emitInt32((intptr_t)Jit.getOrEmitGlobalVariable(GV));
else else
JCE->emitInt64((intptr_t)Jit.getOrEmitGlobalVariable(GV)); JCE->emitInt64((intptr_t)Jit.getOrEmitGlobalVariable(GV));
} else { } else {
if (TD->getPointerSize(0) == sizeof(int32_t)) if (TD->getPointerSize() == sizeof(int32_t))
JCE->emitInt32(0); JCE->emitInt32(0);
else else
JCE->emitInt64(0); JCE->emitInt64(0);
@ -483,7 +481,7 @@ unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF,
unsigned char* unsigned char*
JITDwarfEmitter::EmitCommonEHFrame(const Function* Personality) const { JITDwarfEmitter::EmitCommonEHFrame(const Function* Personality) const {
unsigned PointerSize = TD->getPointerSize(0); unsigned PointerSize = TD->getPointerSize();
int stackGrowth = stackGrowthDirection == TargetFrameLowering::StackGrowsUp ? int stackGrowth = stackGrowthDirection == TargetFrameLowering::StackGrowsUp ?
PointerSize : -PointerSize; PointerSize : -PointerSize;
@ -543,7 +541,7 @@ JITDwarfEmitter::EmitEHFrame(const Function* Personality,
unsigned char* StartFunction, unsigned char* StartFunction,
unsigned char* EndFunction, unsigned char* EndFunction,
unsigned char* ExceptionTable) const { unsigned char* ExceptionTable) const {
unsigned PointerSize = TD->getPointerSize(0); unsigned PointerSize = TD->getPointerSize();
// EH frame header. // EH frame header.
unsigned char* StartEHPtr = (unsigned char*)JCE->getCurrentPCValue(); unsigned char* StartEHPtr = (unsigned char*)JCE->getCurrentPCValue();

View File

@ -881,7 +881,7 @@ MSP430TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
if (ReturnAddrIndex == 0) { if (ReturnAddrIndex == 0) {
// Set up a frame object for the return address. // Set up a frame object for the return address.
uint64_t SlotSize = TD->getPointerSize(0); uint64_t SlotSize = TD->getPointerSize();
ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize, ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize,
true); true);
FuncInfo->setRAIndex(ReturnAddrIndex); FuncInfo->setRAIndex(ReturnAddrIndex);
@ -901,7 +901,7 @@ SDValue MSP430TargetLowering::LowerRETURNADDR(SDValue Op,
if (Depth > 0) { if (Depth > 0) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
SDValue Offset = SDValue Offset =
DAG.getConstant(TD->getPointerSize(0), MVT::i16); DAG.getConstant(TD->getPointerSize(), MVT::i16);
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, getPointerTy(), DAG.getNode(ISD::ADD, dl, getPointerTy(),
FrameAddr, Offset), FrameAddr, Offset),

View File

@ -126,10 +126,8 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) {
return Base; return Base;
// Truncate/sext the offset to the pointer size. // Truncate/sext the offset to the pointer size.
unsigned AS = PtrVal->getType()->isPointerTy() ? if (TD.getPointerSizeInBits() != 64) {
cast<PointerType>(PtrVal->getType())->getAddressSpace() : 0; int SExtAmount = 64-TD.getPointerSizeInBits();
if (TD.getPointerSizeInBits(AS) != 64) {
int SExtAmount = 64-TD.getPointerSizeInBits(AS);
Offset = (Offset << SExtAmount) >> SExtAmount; Offset = (Offset << SExtAmount) >> SExtAmount;
} }
@ -1380,7 +1378,7 @@ getOpenCLAlignment(const DataLayout *TD,
const FunctionType *FTy = dyn_cast<FunctionType>(Ty); const FunctionType *FTy = dyn_cast<FunctionType>(Ty);
if (FTy) if (FTy)
return TD->getPointerPrefAlignment(0); return TD->getPointerPrefAlignment();
return TD->getPrefTypeAlignment(Ty); return TD->getPrefTypeAlignment(Ty);
} }

View File

@ -443,7 +443,7 @@ void PPCLinuxAsmPrinter::EmitFunctionEntryLabel() {
bool PPCLinuxAsmPrinter::doFinalization(Module &M) { bool PPCLinuxAsmPrinter::doFinalization(Module &M) {
const DataLayout *TD = TM.getDataLayout(); const DataLayout *TD = TM.getDataLayout();
bool isPPC64 = TD->getPointerSizeInBits(0) == 64; bool isPPC64 = TD->getPointerSizeInBits() == 64;
if (isPPC64 && !TOC.empty()) { if (isPPC64 && !TOC.empty()) {
const MCSectionELF *Section = OutStreamer.getContext().getELFSection(".toc", const MCSectionELF *Section = OutStreamer.getContext().getELFSection(".toc",
@ -549,7 +549,7 @@ static MCSymbol *GetAnonSym(MCSymbol *Sym, MCContext &Ctx) {
void PPCDarwinAsmPrinter:: void PPCDarwinAsmPrinter::
EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) { EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
bool isPPC64 = TM.getDataLayout()->getPointerSizeInBits(0) == 64; bool isPPC64 = TM.getDataLayout()->getPointerSizeInBits() == 64;
const TargetLoweringObjectFileMachO &TLOFMacho = const TargetLoweringObjectFileMachO &TLOFMacho =
static_cast<const TargetLoweringObjectFileMachO &>(getObjFileLowering()); static_cast<const TargetLoweringObjectFileMachO &>(getObjFileLowering());
@ -644,7 +644,7 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
bool PPCDarwinAsmPrinter::doFinalization(Module &M) { bool PPCDarwinAsmPrinter::doFinalization(Module &M) {
bool isPPC64 = TM.getDataLayout()->getPointerSizeInBits(0) == 64; bool isPPC64 = TM.getDataLayout()->getPointerSizeInBits() == 64;
// Darwin/PPC always uses mach-o. // Darwin/PPC always uses mach-o.
const TargetLoweringObjectFileMachO &TLOFMacho = const TargetLoweringObjectFileMachO &TLOFMacho =

View File

@ -498,7 +498,7 @@ PPCRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
} else if (CRSpillFrameIdx) { } else if (CRSpillFrameIdx) {
FrameIdx = CRSpillFrameIdx; FrameIdx = CRSpillFrameIdx;
} else { } else {
MachineFrameInfo *MFI = (const_cast<MachineFunction &>(MF)).getFrameInfo(); MachineFrameInfo *MFI = ((MachineFunction &)MF).getFrameInfo();
FrameIdx = MFI->CreateFixedObject((uint64_t)4, (int64_t)-4, true); FrameIdx = MFI->CreateFixedObject((uint64_t)4, (int64_t)-4, true);
CRSpillFrameIdx = FrameIdx; CRSpillFrameIdx = FrameIdx;
} }

View File

@ -692,7 +692,7 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
for (unsigned i = 0, e = Stubs.size(); i != e; ++i) { for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
OutStreamer.EmitLabel(Stubs[i].first); OutStreamer.EmitLabel(Stubs[i].first);
OutStreamer.EmitSymbolValue(Stubs[i].second.getPointer(), OutStreamer.EmitSymbolValue(Stubs[i].second.getPointer(),
TD->getPointerSize(0), 0); TD->getPointerSize(), 0);
} }
Stubs.clear(); Stubs.clear();
} }

View File

@ -7824,7 +7824,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(), IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(),
false, false, false, 0); false, false, false, 0);
SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize(0)), SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
getPointerTy()); getPointerTy());
IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale); IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);

View File

@ -1296,16 +1296,15 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
// If the source integer type is not the intptr_t type for this target, do a // If the source integer type is not the intptr_t type for this target, do a
// trunc or zext to the intptr_t type, then inttoptr of it. This allows the // trunc or zext to the intptr_t type, then inttoptr of it. This allows the
// cast to be exposed to other transforms. // cast to be exposed to other transforms.
unsigned AS = CI.getAddressSpace();
if (TD) { if (TD) {
if (CI.getOperand(0)->getType()->getScalarSizeInBits() > if (CI.getOperand(0)->getType()->getScalarSizeInBits() >
TD->getPointerSizeInBits(AS)) { TD->getPointerSizeInBits()) {
Value *P = Builder->CreateTrunc(CI.getOperand(0), Value *P = Builder->CreateTrunc(CI.getOperand(0),
TD->getIntPtrType(CI.getContext())); TD->getIntPtrType(CI.getContext()));
return new IntToPtrInst(P, CI.getType()); return new IntToPtrInst(P, CI.getType());
} }
if (CI.getOperand(0)->getType()->getScalarSizeInBits() < if (CI.getOperand(0)->getType()->getScalarSizeInBits() <
TD->getPointerSizeInBits(AS)) { TD->getPointerSizeInBits()) {
Value *P = Builder->CreateZExt(CI.getOperand(0), Value *P = Builder->CreateZExt(CI.getOperand(0),
TD->getIntPtrType(CI.getContext())); TD->getIntPtrType(CI.getContext()));
return new IntToPtrInst(P, CI.getType()); return new IntToPtrInst(P, CI.getType());
@ -1372,14 +1371,13 @@ Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
// If the destination integer type is not the intptr_t type for this target, // If the destination integer type is not the intptr_t type for this target,
// do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast // do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast
// to be exposed to other transforms. // to be exposed to other transforms.
unsigned AS = CI.getPointerAddressSpace();
if (TD) { if (TD) {
if (CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits(AS)) { if (CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits()) {
Value *P = Builder->CreatePtrToInt(CI.getOperand(0), Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
TD->getIntPtrType(CI.getContext())); TD->getIntPtrType(CI.getContext()));
return new TruncInst(P, CI.getType()); return new TruncInst(P, CI.getType());
} }
if (CI.getType()->getScalarSizeInBits() > TD->getPointerSizeInBits(AS)) { if (CI.getType()->getScalarSizeInBits() > TD->getPointerSizeInBits()) {
Value *P = Builder->CreatePtrToInt(CI.getOperand(0), Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
TD->getIntPtrType(CI.getContext())); TD->getIntPtrType(CI.getContext()));
return new ZExtInst(P, CI.getType()); return new ZExtInst(P, CI.getType());

View File

@ -365,12 +365,11 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// order the state machines in complexity of the generated code. // order the state machines in complexity of the generated code.
Value *Idx = GEP->getOperand(2); Value *Idx = GEP->getOperand(2);
unsigned AS = GEP->getPointerAddressSpace();
// If the index is larger than the pointer size of the target, truncate the // If the index is larger than the pointer size of the target, truncate the
// index down like the GEP would do implicitly. We don't have to do this for // index down like the GEP would do implicitly. We don't have to do this for
// an inbounds GEP because the index can't be out of range. // an inbounds GEP because the index can't be out of range.
if (!GEP->isInBounds() && if (!GEP->isInBounds() &&
Idx->getType()->getPrimitiveSizeInBits() > TD->getPointerSizeInBits(AS)) Idx->getType()->getPrimitiveSizeInBits() > TD->getPointerSizeInBits())
Idx = Builder->CreateTrunc(Idx, TD->getIntPtrType(Idx->getContext())); Idx = Builder->CreateTrunc(Idx, TD->getIntPtrType(Idx->getContext()));
// If the comparison is only true for one or two elements, emit direct // If the comparison is only true for one or two elements, emit direct
@ -529,11 +528,10 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
} }
} }
unsigned AS = cast<GetElementPtrInst>(GEP)->getPointerAddressSpace();
// Okay, we know we have a single variable index, which must be a // Okay, we know we have a single variable index, which must be a
// pointer/array/vector index. If there is no offset, life is simple, return // pointer/array/vector index. If there is no offset, life is simple, return
// the index. // the index.
unsigned IntPtrWidth = TD.getPointerSizeInBits(AS); unsigned IntPtrWidth = TD.getPointerSizeInBits();
if (Offset == 0) { if (Offset == 0) {
// Cast to intptrty in case a truncation occurs. If an extension is needed, // Cast to intptrty in case a truncation occurs. If an extension is needed,
// we don't need to bother extending: the extension won't affect where the // we don't need to bother extending: the extension won't affect where the
@ -1554,8 +1552,7 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
// Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
// integer type is the same size as the pointer type. // integer type is the same size as the pointer type.
if (TD && LHSCI->getOpcode() == Instruction::PtrToInt && if (TD && LHSCI->getOpcode() == Instruction::PtrToInt &&
TD->getPointerSizeInBits( TD->getPointerSizeInBits() ==
cast<PtrToIntInst>(LHSCI)->getPointerAddressSpace()) ==
cast<IntegerType>(DestTy)->getBitWidth()) { cast<IntegerType>(DestTy)->getBitWidth()) {
Value *RHSOp = 0; Value *RHSOp = 0;
if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1))) { if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1))) {

View File

@ -704,7 +704,7 @@ bool AddressSanitizer::doInitialization(Module &M) {
BL.reset(new BlackList(ClBlackListFile)); BL.reset(new BlackList(ClBlackListFile));
C = &(M.getContext()); C = &(M.getContext());
LongSize = TD->getPointerSizeInBits(0); LongSize = TD->getPointerSizeInBits();
IntptrTy = Type::getIntNTy(*C, LongSize); IntptrTy = Type::getIntNTy(*C, LongSize);
IntptrPtrTy = PointerType::get(IntptrTy, 0); IntptrPtrTy = PointerType::get(IntptrTy, 0);

View File

@ -174,11 +174,10 @@ bool MemsetRange::isProfitableToUseMemset(const DataLayout &TD) const {
// this width can be stored. If so, check to see whether we will end up // this width can be stored. If so, check to see whether we will end up
// actually reducing the number of stores used. // actually reducing the number of stores used.
unsigned Bytes = unsigned(End-Start); unsigned Bytes = unsigned(End-Start);
unsigned AS = cast<StoreInst>(TheStores[0])->getPointerAddressSpace(); unsigned NumPointerStores = Bytes/TD.getPointerSize();
unsigned NumPointerStores = Bytes/TD.getPointerSize(AS);
// Assume the remaining bytes if any are done a byte at a time. // Assume the remaining bytes if any are done a byte at a time.
unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize(AS); unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize();
// If we will reduce the # stores (according to this heuristic), do the // If we will reduce the # stores (according to this heuristic), do the
// transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32

View File

@ -444,7 +444,6 @@ protected:
bool computeConstantGEPOffset(GetElementPtrInst &GEPI, int64_t &GEPOffset) { bool computeConstantGEPOffset(GetElementPtrInst &GEPI, int64_t &GEPOffset) {
GEPOffset = Offset; GEPOffset = Offset;
unsigned int AS = GEPI.getPointerAddressSpace();
for (gep_type_iterator GTI = gep_type_begin(GEPI), GTE = gep_type_end(GEPI); for (gep_type_iterator GTI = gep_type_begin(GEPI), GTE = gep_type_end(GEPI);
GTI != GTE; ++GTI) { GTI != GTE; ++GTI) {
ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
@ -474,7 +473,7 @@ protected:
continue; continue;
} }
APInt Index = OpC->getValue().sextOrTrunc(TD.getPointerSizeInBits(AS)); APInt Index = OpC->getValue().sextOrTrunc(TD.getPointerSizeInBits());
Index *= APInt(Index.getBitWidth(), Index *= APInt(Index.getBitWidth(),
TD.getTypeAllocSize(GTI.getIndexedType())); TD.getTypeAllocSize(GTI.getIndexedType()));
Index += APInt(Index.getBitWidth(), (uint64_t)GEPOffset, Index += APInt(Index.getBitWidth(), (uint64_t)GEPOffset,
@ -2395,8 +2394,7 @@ private:
Value *getAdjustedAllocaPtr(IRBuilder<> &IRB, Type *PointerTy) { Value *getAdjustedAllocaPtr(IRBuilder<> &IRB, Type *PointerTy) {
assert(BeginOffset >= NewAllocaBeginOffset); assert(BeginOffset >= NewAllocaBeginOffset);
unsigned AS = cast<PointerType>(PointerTy)->getAddressSpace(); APInt Offset(TD.getPointerSizeInBits(), BeginOffset - NewAllocaBeginOffset);
APInt Offset(TD.getPointerSizeInBits(AS), BeginOffset - NewAllocaBeginOffset);
return getAdjustedPtr(IRB, TD, &NewAI, Offset, PointerTy, getName("")); return getAdjustedPtr(IRB, TD, &NewAI, Offset, PointerTy, getName(""));
} }
@ -2793,10 +2791,8 @@ private:
const AllocaPartitioning::MemTransferOffsets &MTO const AllocaPartitioning::MemTransferOffsets &MTO
= P.getMemTransferOffsets(II); = P.getMemTransferOffsets(II);
assert(OldPtr->getType()->isPointerTy() && "Must be a pointer type!");
unsigned AS = cast<PointerType>(OldPtr->getType())->getAddressSpace();
// Compute the relative offset within the transfer. // Compute the relative offset within the transfer.
unsigned IntPtrWidth = TD.getPointerSizeInBits(AS); unsigned IntPtrWidth = TD.getPointerSizeInBits();
APInt RelOffset(IntPtrWidth, BeginOffset - (IsDest ? MTO.DestBegin APInt RelOffset(IntPtrWidth, BeginOffset - (IsDest ? MTO.DestBegin
: MTO.SourceBegin)); : MTO.SourceBegin));

View File

@ -806,8 +806,7 @@ unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
const DataLayout *TD) { const DataLayout *TD) {
assert(V->getType()->isPointerTy() && assert(V->getType()->isPointerTy() &&
"getOrEnforceKnownAlignment expects a pointer!"); "getOrEnforceKnownAlignment expects a pointer!");
unsigned AS = cast<PointerType>(V->getType())->getAddressSpace(); unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64;
unsigned BitWidth = TD ? TD->getPointerSizeInBits(AS) : 64;
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
ComputeMaskedBits(V, KnownZero, KnownOne, TD); ComputeMaskedBits(V, KnownZero, KnownOne, TD);
unsigned TrailZ = KnownZero.countTrailingOnes(); unsigned TrailZ = KnownZero.countTrailingOnes();