diff --git a/llvm/include/llvm/Target/TargetData.h b/llvm/include/llvm/Target/TargetData.h index 026749f3e094..94ef0a24398c 100644 --- a/llvm/include/llvm/Target/TargetData.h +++ b/llvm/include/llvm/Target/TargetData.h @@ -155,26 +155,48 @@ public: /// Target pointer size, in bits unsigned char getPointerSizeInBits() const { return 8*PointerMemSize; } - /// getTypeSize - Return the number of bytes necessary to hold the specified - /// type. - uint64_t getTypeSize(const Type *Ty) const; - - /// getABITypeSize - Return the number of bytes allocated for the specified - /// type when used as an element in a larger object, including alignment - /// padding. - uint64_t getABITypeSize(const Type *Ty) const { - unsigned char Align = getABITypeAlignment(Ty); - return (getTypeSize(Ty) + Align - 1)/Align*Align; - } - /// getTypeSizeInBits - Return the number of bits necessary to hold the - /// specified type. + /// specified type. For example, returns 36 for i36 and 80 for x86_fp80. uint64_t getTypeSizeInBits(const Type* Ty) const; - /// getABITypeSizeInBits - Return the number of bytes allocated for the - /// specified type when used as an element in a larger object, including - /// alignment padding. - uint64_t getABITypeSizeInBits(const Type* Ty) const; + /// getTypeStoreSize - Return the maximum number of bytes that may be + /// overwritten by storing the specified type. For example, returns 5 + /// for i36 and 10 for x86_fp80. + uint64_t getTypeStoreSize(const Type *Ty) const { + return (getTypeSizeInBits(Ty)+7)/8; + } + + /// getTypeStoreSizeInBits - Return the maximum number of bits that may be + /// overwritten by storing the specified type; always a multiple of 8. For + /// example, returns 40 for i36 and 80 for x86_fp80. + uint64_t getTypeStoreSizeInBits(const Type *Ty) const { + return 8*getTypeStoreSize(Ty); + } + + /// getABITypeSize - Return the offset in bytes between successive objects + /// of the specified type, including alignment padding. This is the amount + /// that alloca reserves for this type. For example, returns 12 or 16 for + /// x86_fp80, depending on alignment. + uint64_t getABITypeSize(const Type* Ty) const { + unsigned char Align = getABITypeAlignment(Ty); + return (getTypeStoreSize(Ty) + Align - 1)/Align*Align; + } + + /// getABITypeSizeInBits - Return the offset in bits between successive + /// objects of the specified type, including alignment padding; always a + /// multiple of 8. This is the amount that alloca reserves for this type. + /// For example, returns 96 or 128 for x86_fp80, depending on alignment. + uint64_t getABITypeSizeInBits(const Type* Ty) const { + return 8*getABITypeSize(Ty); + } + + /// getTypeSize - Obsolete method, do not use. Replaced by getTypeStoreSize + /// and getABITypeSize. For alias analysis of loads and stores you probably + /// want getTypeStoreSize. Use getABITypeSize for GEP computations and alloca + /// sizing. + uint64_t getTypeSize(const Type *Ty) const { + return getTypeStoreSize(Ty); + } /// getABITypeAlignment - Return the minimum ABI-required alignment for the /// specified type. @@ -238,6 +260,10 @@ public: return StructSize; } + uint64_t getSizeInBits() const { + return 8*StructSize; + } + unsigned getAlignment() const { return StructAlignment; } diff --git a/llvm/lib/Analysis/AliasAnalysis.cpp b/llvm/lib/Analysis/AliasAnalysis.cpp index 5ae234240455..2a3ac5ae17c8 100644 --- a/llvm/lib/Analysis/AliasAnalysis.cpp +++ b/llvm/lib/Analysis/AliasAnalysis.cpp @@ -95,7 +95,7 @@ AliasAnalysis::getModRefInfo(CallSite CS1, CallSite CS2) { AliasAnalysis::ModRefResult AliasAnalysis::getModRefInfo(LoadInst *L, Value *P, unsigned Size) { - return alias(L->getOperand(0), TD->getTypeSize(L->getType()), + return alias(L->getOperand(0), TD->getTypeStoreSize(L->getType()), P, Size) ? Ref : NoModRef; } @@ -103,8 +103,8 @@ AliasAnalysis::ModRefResult AliasAnalysis::getModRefInfo(StoreInst *S, Value *P, unsigned Size) { // If the stored address cannot alias the pointer in question, then the // pointer cannot be modified by the store. - if (!alias(S->getOperand(1), TD->getTypeSize(S->getOperand(0)->getType()), - P, Size)) + if (!alias(S->getOperand(1), + TD->getTypeStoreSize(S->getOperand(0)->getType()), P, Size)) return NoModRef; // If the pointer is a pointer to constant memory, then it could not have been diff --git a/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp b/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp index 30965c2fb3ef..e0457b1778ae 100644 --- a/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp +++ b/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp @@ -137,12 +137,12 @@ bool AAEval::runOnFunction(Function &F) { I1 != E; ++I1) { unsigned I1Size = 0; const Type *I1ElTy = cast((*I1)->getType())->getElementType(); - if (I1ElTy->isSized()) I1Size = TD.getTypeSize(I1ElTy); + if (I1ElTy->isSized()) I1Size = TD.getTypeStoreSize(I1ElTy); for (std::set::iterator I2 = Pointers.begin(); I2 != I1; ++I2) { unsigned I2Size = 0; const Type *I2ElTy =cast((*I2)->getType())->getElementType(); - if (I2ElTy->isSized()) I2Size = TD.getTypeSize(I2ElTy); + if (I2ElTy->isSized()) I2Size = TD.getTypeStoreSize(I2ElTy); switch (AA.alias(*I1, I1Size, *I2, I2Size)) { case AliasAnalysis::NoAlias: @@ -169,7 +169,7 @@ bool AAEval::runOnFunction(Function &F) { V != Ve; ++V) { unsigned Size = 0; const Type *ElTy = cast((*V)->getType())->getElementType(); - if (ElTy->isSized()) Size = TD.getTypeSize(ElTy); + if (ElTy->isSized()) Size = TD.getTypeStoreSize(ElTy); switch (AA.getModRefInfo(*C, *V, Size)) { case AliasAnalysis::NoModRef: diff --git a/llvm/lib/Analysis/AliasSetTracker.cpp b/llvm/lib/Analysis/AliasSetTracker.cpp index 366909c0ebeb..fcdd1b339996 100644 --- a/llvm/lib/Analysis/AliasSetTracker.cpp +++ b/llvm/lib/Analysis/AliasSetTracker.cpp @@ -269,7 +269,7 @@ bool AliasSetTracker::add(Value *Ptr, unsigned Size) { bool AliasSetTracker::add(LoadInst *LI) { bool NewPtr; AliasSet &AS = addPointer(LI->getOperand(0), - AA.getTargetData().getTypeSize(LI->getType()), + AA.getTargetData().getTypeStoreSize(LI->getType()), AliasSet::Refs, NewPtr); if (LI->isVolatile()) AS.setVolatile(); return NewPtr; @@ -279,7 +279,7 @@ bool AliasSetTracker::add(StoreInst *SI) { bool NewPtr; Value *Val = SI->getOperand(0); AliasSet &AS = addPointer(SI->getOperand(1), - AA.getTargetData().getTypeSize(Val->getType()), + AA.getTargetData().getTypeStoreSize(Val->getType()), AliasSet::Mods, NewPtr); if (SI->isVolatile()) AS.setVolatile(); return NewPtr; @@ -395,7 +395,7 @@ bool AliasSetTracker::remove(Value *Ptr, unsigned Size) { } bool AliasSetTracker::remove(LoadInst *LI) { - unsigned Size = AA.getTargetData().getTypeSize(LI->getType()); + unsigned Size = AA.getTargetData().getTypeStoreSize(LI->getType()); AliasSet *AS = findAliasSetForPointer(LI->getOperand(0), Size); if (!AS) return false; remove(*AS); @@ -403,7 +403,8 @@ bool AliasSetTracker::remove(LoadInst *LI) { } bool AliasSetTracker::remove(StoreInst *SI) { - unsigned Size = AA.getTargetData().getTypeSize(SI->getOperand(0)->getType()); + unsigned Size = + AA.getTargetData().getTypeStoreSize(SI->getOperand(0)->getType()); AliasSet *AS = findAliasSetForPointer(SI->getOperand(1), Size); if (!AS) return false; remove(*AS); diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp index 89b5d5c3cb90..6aeaec23faf9 100644 --- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp +++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp @@ -364,7 +364,7 @@ BasicAliasAnalysis::alias(const Value *V1, unsigned V1Size, // global/alloca/malloc, it cannot be accessing the global (it's // undefined to load or store bytes before or after an object). const Type *ElTy = cast(O1->getType())->getElementType(); - unsigned GlobalSize = getTargetData().getTypeSize(ElTy); + unsigned GlobalSize = getTargetData().getABITypeSize(ElTy); if (GlobalSize < V2Size && V2Size != ~0U) return NoAlias; } @@ -382,7 +382,7 @@ BasicAliasAnalysis::alias(const Value *V1, unsigned V1Size, // global/alloca/malloc, it cannot be accessing the object (it's // undefined to load or store bytes before or after an object). const Type *ElTy = cast(O2->getType())->getElementType(); - unsigned GlobalSize = getTargetData().getTypeSize(ElTy); + unsigned GlobalSize = getTargetData().getABITypeSize(ElTy); if (GlobalSize < V1Size && V1Size != ~0U) return NoAlias; } diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp index 599c7697a388..886dd9f4f762 100644 --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -74,7 +74,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, Offset += TD.getStructLayout(ST)->getElementOffset(CI->getZExtValue()); } else { const SequentialType *SQT = cast(*GTI); - Offset += TD.getTypeSize(SQT->getElementType())*CI->getSExtValue(); + Offset += TD.getABITypeSize(SQT->getElementType())*CI->getSExtValue(); } } return true; diff --git a/llvm/lib/Analysis/LoadValueNumbering.cpp b/llvm/lib/Analysis/LoadValueNumbering.cpp index f1ade951f34b..3af92bc11cb2 100644 --- a/llvm/lib/Analysis/LoadValueNumbering.cpp +++ b/llvm/lib/Analysis/LoadValueNumbering.cpp @@ -293,7 +293,7 @@ void LoadVN::getEqualNumberNodes(Value *V, Function *F = LoadBB->getParent(); // Find out how many bytes of memory are loaded by the load instruction... - unsigned LoadSize = getAnalysis().getTypeSize(LI->getType()); + unsigned LoadSize = getAnalysis().getTypeStoreSize(LI->getType()); AliasAnalysis &AA = getAnalysis(); // Figure out if the load is invalidated from the entry of the block it is in diff --git a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp index 538a394d46d7..5375d52c33c6 100644 --- a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp +++ b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -78,20 +78,20 @@ Instruction* MemoryDependenceAnalysis::getCallSiteDependency(CallSite C, uint64_t pointerSize = 0; if (StoreInst* S = dyn_cast(QI)) { pointer = S->getPointerOperand(); - pointerSize = TD.getTypeSize(S->getOperand(0)->getType()); + pointerSize = TD.getTypeStoreSize(S->getOperand(0)->getType()); } else if (LoadInst* L = dyn_cast(QI)) { pointer = L->getPointerOperand(); - pointerSize = TD.getTypeSize(L->getType()); + pointerSize = TD.getTypeStoreSize(L->getType()); } else if (AllocationInst* AI = dyn_cast(QI)) { pointer = AI; if (ConstantInt* C = dyn_cast(AI->getArraySize())) pointerSize = C->getZExtValue() * \ - TD.getTypeSize(AI->getAllocatedType()); + TD.getABITypeSize(AI->getAllocatedType()); else pointerSize = ~0UL; } else if (VAArgInst* V = dyn_cast(QI)) { pointer = V->getOperand(0); - pointerSize = TD.getTypeSize(V->getType()); + pointerSize = TD.getTypeStoreSize(V->getType()); } else if (FreeInst* F = dyn_cast(QI)) { pointer = F->getPointerOperand(); @@ -287,15 +287,15 @@ Instruction* MemoryDependenceAnalysis::getDependency(Instruction* query, bool queryIsVolatile = false; if (StoreInst* S = dyn_cast(query)) { dependee = S->getPointerOperand(); - dependeeSize = TD.getTypeSize(S->getOperand(0)->getType()); + dependeeSize = TD.getTypeStoreSize(S->getOperand(0)->getType()); queryIsVolatile = S->isVolatile(); } else if (LoadInst* L = dyn_cast(query)) { dependee = L->getPointerOperand(); - dependeeSize = TD.getTypeSize(L->getType()); + dependeeSize = TD.getTypeStoreSize(L->getType()); queryIsVolatile = L->isVolatile(); } else if (VAArgInst* V = dyn_cast(query)) { dependee = V->getOperand(0); - dependeeSize = TD.getTypeSize(V->getType()); + dependeeSize = TD.getTypeStoreSize(V->getType()); } else if (FreeInst* F = dyn_cast(query)) { dependee = F->getPointerOperand(); @@ -330,7 +330,7 @@ Instruction* MemoryDependenceAnalysis::getDependency(Instruction* query, } pointer = S->getPointerOperand(); - pointerSize = TD.getTypeSize(S->getOperand(0)->getType()); + pointerSize = TD.getTypeStoreSize(S->getOperand(0)->getType()); } else if (LoadInst* L = dyn_cast(QI)) { // All volatile loads/stores depend on each other if (queryIsVolatile && L->isVolatile()) { @@ -343,17 +343,17 @@ Instruction* MemoryDependenceAnalysis::getDependency(Instruction* query, } pointer = L->getPointerOperand(); - pointerSize = TD.getTypeSize(L->getType()); + pointerSize = TD.getTypeStoreSize(L->getType()); } else if (AllocationInst* AI = dyn_cast(QI)) { pointer = AI; if (ConstantInt* C = dyn_cast(AI->getArraySize())) pointerSize = C->getZExtValue() * \ - TD.getTypeSize(AI->getAllocatedType()); + TD.getABITypeSize(AI->getAllocatedType()); else pointerSize = ~0UL; } else if (VAArgInst* V = dyn_cast(QI)) { pointer = V->getOperand(0); - pointerSize = TD.getTypeSize(V->getType()); + pointerSize = TD.getTypeStoreSize(V->getType()); } else if (FreeInst* F = dyn_cast(QI)) { pointer = F->getPointerOperand(); diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index 9150e96cc027..8b60d7c2ce11 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -3524,7 +3524,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { MVT::ValueType slotVT = (Node->getOpcode() == ISD::FP_EXTEND) ? oldVT : newVT; const Type *Ty = MVT::getTypeForValueType(slotVT); - uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty); + uint64_t TySize = TLI.getTargetData()->getABITypeSize(Ty); unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty); MachineFunction &MF = DAG.getMachineFunction(); int SSFI = @@ -3618,7 +3618,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // slots and always reusing the same one. We currently always create // new ones, as reuse may inhibit scheduling. const Type *Ty = MVT::getTypeForValueType(ExtraVT); - uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty); + uint64_t TySize = TLI.getTargetData()->getABITypeSize(Ty); unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty); MachineFunction &MF = DAG.getMachineFunction(); int SSFI = diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp index bb5379c34917..c98c1312fd63 100644 --- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp @@ -482,7 +482,7 @@ void ScheduleDAG::AddOperand(MachineInstr *MI, SDOperand Op, Align = TM.getTargetData()->getPreferredTypeAlignmentShift(Type); if (Align == 0) { // Alignment of vector types. FIXME! - Align = TM.getTargetData()->getTypeSize(Type); + Align = TM.getTargetData()->getABITypeSize(Type); Align = Log2_64(Align); } } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index b103e28b54e0..a5b161fbefa8 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -261,7 +261,7 @@ FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli, if (AllocaInst *AI = dyn_cast(I)) if (ConstantInt *CUI = dyn_cast(AI->getArraySize())) { const Type *Ty = AI->getAllocatedType(); - uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty); + uint64_t TySize = TLI.getTargetData()->getABITypeSize(Ty); unsigned Align = std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), AI->getAlignment()); @@ -2335,7 +2335,7 @@ void SelectionDAGLowering::visitAlloca(AllocaInst &I) { return; // getValue will auto-populate this. const Type *Ty = I.getAllocatedType(); - uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty); + uint64_t TySize = TLI.getTargetData()->getABITypeSize(Ty); unsigned Align = std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), I.getAlignment()); @@ -3546,7 +3546,7 @@ void SelectionDAGLowering::visitInlineAsm(CallInst &I) { // Otherwise, create a stack slot and emit a store to it before the // asm. const Type *Ty = OpVal->getType(); - uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty); + uint64_t TySize = TLI.getTargetData()->getABITypeSize(Ty); unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty); MachineFunction &MF = DAG.getMachineFunction(); int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align); @@ -3804,7 +3804,7 @@ void SelectionDAGLowering::visitMalloc(MallocInst &I) { Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src); // Scale the source by the type size. - uint64_t ElementSize = TD->getTypeSize(I.getType()->getElementType()); + uint64_t ElementSize = TD->getABITypeSize(I.getType()->getElementType()); Src = DAG.getNode(ISD::MUL, Src.getValueType(), Src, getIntPtrConstant(ElementSize)); @@ -3917,7 +3917,7 @@ TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) { const StructType *STy = cast(Ty->getElementType()); unsigned StructAlign = Log2_32(getTargetData()->getCallFrameTypeAlignment(STy)); - unsigned StructSize = getTargetData()->getTypeSize(STy); + unsigned StructSize = getTargetData()->getABITypeSize(STy); Flags |= (StructAlign << ISD::ParamFlags::ByValAlignOffs); Flags |= (StructSize << ISD::ParamFlags::ByValSizeOffs); } @@ -4047,7 +4047,7 @@ TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, const StructType *STy = cast(Ty->getElementType()); unsigned StructAlign = Log2_32(getTargetData()->getCallFrameTypeAlignment(STy)); - unsigned StructSize = getTargetData()->getTypeSize(STy); + unsigned StructSize = getTargetData()->getABITypeSize(STy); Flags |= (StructAlign << ISD::ParamFlags::ByValAlignOffs); Flags |= (StructSize << ISD::ParamFlags::ByValSizeOffs); } diff --git a/llvm/lib/ExecutionEngine/ExecutionEngine.cpp b/llvm/lib/ExecutionEngine/ExecutionEngine.cpp index d89a9bb4ac7e..72db4e436034 100644 --- a/llvm/lib/ExecutionEngine/ExecutionEngine.cpp +++ b/llvm/lib/ExecutionEngine/ExecutionEngine.cpp @@ -735,7 +735,7 @@ void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) { return; } else if (const ConstantVector *CP = dyn_cast(Init)) { unsigned ElementSize = - getTargetData()->getTypeSize(CP->getType()->getElementType()); + getTargetData()->getABITypeSize(CP->getType()->getElementType()); for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i) InitializeMemory(CP->getOperand(i), (char*)Addr+i*ElementSize); return; @@ -744,7 +744,7 @@ void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) { StoreValueToMemory(Val, (GenericValue*)Addr, Init->getType()); return; } else if (isa(Init)) { - memset(Addr, 0, (size_t)getTargetData()->getTypeSize(Init->getType())); + memset(Addr, 0, (size_t)getTargetData()->getABITypeSize(Init->getType())); return; } @@ -752,7 +752,7 @@ void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) { case Type::ArrayTyID: { const ConstantArray *CPA = cast(Init); unsigned ElementSize = - getTargetData()->getTypeSize(CPA->getType()->getElementType()); + getTargetData()->getABITypeSize(CPA->getType()->getElementType()); for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i) InitializeMemory(CPA->getOperand(i), (char*)Addr+i*ElementSize); return; @@ -843,7 +843,7 @@ void ExecutionEngine::emitGlobals() { const Type *Ty = I->getType()->getElementType(); // Allocate some memory for it! - unsigned Size = TD->getTypeSize(Ty); + unsigned Size = TD->getABITypeSize(Ty); addGlobalMapping(I, new char[Size]); } else { // External variable reference. Try to use the dynamic loader to @@ -897,7 +897,7 @@ void ExecutionEngine::EmitGlobalVariable(const GlobalVariable *GV) { DOUT << "Global '" << GV->getName() << "' -> " << GA << "\n"; const Type *ElTy = GV->getType()->getElementType(); - size_t GVSize = (size_t)getTargetData()->getTypeSize(ElTy); + size_t GVSize = (size_t)getTargetData()->getABITypeSize(ElTy); if (GA == 0) { // If it's not already specified, allocate memory for the global. GA = new char[GVSize]; diff --git a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp index f11cf816b22b..6ab123125f27 100644 --- a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp +++ b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp @@ -746,7 +746,7 @@ void Interpreter::visitAllocationInst(AllocationInst &I) { unsigned NumElements = getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue(); - unsigned TypeSize = (size_t)TD.getTypeSize(Ty); + unsigned TypeSize = (size_t)TD.getABITypeSize(Ty); // Avoid malloc-ing zero bytes, use max()... unsigned MemToAlloc = std::max(1U, NumElements * TypeSize); @@ -806,7 +806,7 @@ GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I, Idx = (int64_t)IdxGV.IntVal.getZExtValue(); else assert(0 && "Invalid index type for getelementptr"); - Total += TD.getTypeSize(ST->getElementType())*Idx; + Total += TD.getABITypeSize(ST->getElementType())*Idx; } } diff --git a/llvm/lib/ExecutionEngine/JIT/JIT.cpp b/llvm/lib/ExecutionEngine/JIT/JIT.cpp index 640520f313c4..5b04124cd055 100644 --- a/llvm/lib/ExecutionEngine/JIT/JIT.cpp +++ b/llvm/lib/ExecutionEngine/JIT/JIT.cpp @@ -337,7 +337,7 @@ void *JIT::getOrEmitGlobalVariable(const GlobalVariable *GV) { // actually initialize the global after current function has finished // compilation. const Type *GlobalType = GV->getType()->getElementType(); - size_t S = getTargetData()->getTypeSize(GlobalType); + size_t S = getTargetData()->getABITypeSize(GlobalType); size_t A = getTargetData()->getPrefTypeAlignment(GlobalType); if (A <= 8) { Ptr = malloc(S); diff --git a/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp b/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp index b7af521dd971..eab322cce422 100644 --- a/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp +++ b/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp @@ -899,7 +899,7 @@ void JITEmitter::emitConstantPool(MachineConstantPool *MCP) { unsigned Size = CPE.Offset; const Type *Ty = CPE.isMachineConstantPoolEntry() ? CPE.Val.MachineCPVal->getType() : CPE.Val.ConstVal->getType(); - Size += TheJIT->getTargetData()->getTypeSize(Ty); + Size += TheJIT->getTargetData()->getABITypeSize(Ty); ConstantPoolBase = allocateSpace(Size, 1 << MCP->getConstantPoolAlignment()); ConstantPool = MCP; diff --git a/llvm/lib/Target/TargetData.cpp b/llvm/lib/Target/TargetData.cpp index b1b78a8adad3..5a189205ed93 100644 --- a/llvm/lib/Target/TargetData.cpp +++ b/llvm/lib/Target/TargetData.cpp @@ -49,14 +49,13 @@ StructLayout::StructLayout(const StructType *ST, const TargetData &TD) { // Loop over each of the elements, placing them in memory... for (unsigned i = 0, e = NumElements; i != e; ++i) { const Type *Ty = ST->getElementType(i); - unsigned TyAlign; - uint64_t TySize; - TyAlign = (ST->isPacked() ? 1 : TD.getABITypeAlignment(Ty)); - TySize = TD.getTypeSize(Ty); + unsigned TyAlign = ST->isPacked() ? + 1 : TD.getABITypeAlignment(Ty); + uint64_t TySize = ST->isPacked() ? + TD.getTypeStoreSize(Ty) : TD.getABITypeSize(Ty); - // Add padding if necessary to make the data element aligned properly... - if (StructSize % TyAlign != 0) - StructSize = (StructSize/TyAlign + 1) * TyAlign; // Add padding... + // Add padding if necessary to align the data element properly... + StructSize = (StructSize + TyAlign - 1)/TyAlign * TyAlign; // Keep track of maximum alignment constraint StructAlignment = std::max(TyAlign, StructAlignment); @@ -406,83 +405,47 @@ std::string TargetData::getStringRepresentation() const { } -uint64_t TargetData::getTypeSize(const Type *Ty) const { +uint64_t TargetData::getTypeSizeInBits(const Type *Ty) const { assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!"); switch (Ty->getTypeID()) { case Type::LabelTyID: case Type::PointerTyID: - return getPointerSize(); + return getPointerSizeInBits(); case Type::ArrayTyID: { const ArrayType *ATy = cast(Ty); - uint64_t Size; - unsigned char Alignment; - Size = getTypeSize(ATy->getElementType()); - Alignment = getABITypeAlignment(ATy->getElementType()); - uint64_t AlignedSize = (Size + Alignment - 1)/Alignment*Alignment; - return AlignedSize*ATy->getNumElements(); + return getABITypeSizeInBits(ATy->getElementType())*ATy->getNumElements(); } case Type::StructTyID: { // Get the layout annotation... which is lazily created on demand. const StructLayout *Layout = getStructLayout(cast(Ty)); - return Layout->getSizeInBytes(); - } - case Type::IntegerTyID: { - unsigned BitWidth = cast(Ty)->getBitWidth(); - if (BitWidth <= 8) { - return 1; - } else if (BitWidth <= 16) { - return 2; - } else if (BitWidth <= 32) { - return 4; - } else if (BitWidth <= 64) { - return 8; - } else { - // The size of this > 64 bit type is chosen as a multiple of the - // preferred alignment of the largest "native" size the target supports. - // We first obtain the the alignment info for this type and then compute - // the next largest multiple of that size. - uint64_t size = getAlignmentInfo(INTEGER_ALIGN, BitWidth, false) * 8; - return (((BitWidth / (size)) + (BitWidth % size != 0)) * size) / 8; - } - break; + return Layout->getSizeInBits(); } + case Type::IntegerTyID: + return cast(Ty)->getBitWidth(); case Type::VoidTyID: - return 1; - case Type::FloatTyID: - return 4; - case Type::DoubleTyID: return 8; + case Type::FloatTyID: + return 32; + case Type::DoubleTyID: + return 64; case Type::PPC_FP128TyID: case Type::FP128TyID: - return 16; + return 128; // In memory objects this is always aligned to a higher boundary, but - // only 10 bytes contain information. + // only 80 bits contain information. case Type::X86_FP80TyID: - return 10; + return 80; case Type::VectorTyID: { const VectorType *PTy = cast(Ty); - return PTy->getBitWidth() / 8; + return PTy->getBitWidth(); } default: - assert(0 && "TargetData::getTypeSize(): Unsupported type"); + assert(0 && "TargetData::getTypeSizeInBits(): Unsupported type"); break; } return 0; } -uint64_t TargetData::getTypeSizeInBits(const Type *Ty) const { - if (Ty->isInteger()) - return cast(Ty)->getBitWidth(); - else - return getTypeSize(Ty) * 8; -} - -uint64_t TargetData::getABITypeSizeInBits(const Type *Ty) const { - if (Ty->isInteger()) - return cast(Ty)->getBitWidth(); - else - return getABITypeSize(Ty) * 8; -} /*! \param abi_or_pref Flag that determines which alignment is returned. true returns the ABI alignment, false returns the preferred alignment. @@ -542,7 +505,7 @@ unsigned char TargetData::getAlignment(const Type *Ty, bool abi_or_pref) const { break; } - return getAlignmentInfo((AlignTypeEnum)AlignType, getTypeSize(Ty) * 8, + return getAlignmentInfo((AlignTypeEnum)AlignType, getTypeSizeInBits(Ty), abi_or_pref); } @@ -603,7 +566,7 @@ uint64_t TargetData::getIndexedOffset(const Type *ptrTy, Value* const* Indices, // Get the array index and the size of each array element. int64_t arrayIdx = cast(Indices[CurIDX])->getSExtValue(); - Result += arrayIdx * (int64_t)getTypeSize(Ty); + Result += arrayIdx * (int64_t)getABITypeSize(Ty); } } @@ -623,7 +586,7 @@ unsigned TargetData::getPreferredAlignmentLog(const GlobalVariable *GV) const { if (Alignment < 4) { // If the global is not external, see if it is large. If so, give it a // larger alignment. - if (getTypeSize(ElemType) > 128) + if (getTypeSizeInBits(ElemType) > 128) Alignment = 4; // 16-byte alignment. } } diff --git a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp index 85b29f871fc3..7479c8ee6740 100644 --- a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp +++ b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp @@ -277,7 +277,7 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg) const { const PointerType *LoadTy = cast(Load->getOperand(0)->getType()); - unsigned LoadSize = (unsigned)TD.getTypeSize(LoadTy->getElementType()); + unsigned LoadSize = (unsigned)TD.getTypeStoreSize(LoadTy->getElementType()); if (AA.canInstructionRangeModify(BB->front(), *Load, Arg, LoadSize)) return false; // Pointer is invalidated! diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp index 5d8f969f3359..779b4a1871c5 100644 --- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -1227,7 +1227,7 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, // (2048 bytes currently), as we don't want to introduce a 16M global or // something. if (NElements->getZExtValue()* - TD.getTypeSize(MI->getAllocatedType()) < 2048) { + TD.getABITypeSize(MI->getAllocatedType()) < 2048) { GVI = OptimizeGlobalAddressOfMalloc(GV, MI); return true; } diff --git a/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp b/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp index c2a078723641..0dad42f8e21d 100644 --- a/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp +++ b/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp @@ -634,7 +634,7 @@ static bool FindMaximalLegalAddressingMode(Value *Addr, const Type *AccessTy, cast(AddrInst->getOperand(i))->getZExtValue(); ConstantOffset += SL->getElementOffset(Idx); } else { - uint64_t TypeSize = TD->getTypeSize(GTI.getIndexedType()); + uint64_t TypeSize = TD->getABITypeSize(GTI.getIndexedType()); if (ConstantInt *CI = dyn_cast(AddrInst->getOperand(i))) { ConstantOffset += CI->getSExtValue()*TypeSize; } else if (TypeSize) { // Scales of zero don't do anything. diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp index 2e1d9ade0a72..e5c557c349ff 100644 --- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -137,8 +137,8 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) { dep != MemoryDependenceAnalysis::NonLocal && isa(dep)) { if (dep != last || - TD.getTypeSize(last->getOperand(0)->getType()) > - TD.getTypeSize(BBI->getOperand(0)->getType())) { + TD.getTypeStoreSize(last->getOperand(0)->getType()) > + TD.getTypeStoreSize(BBI->getOperand(0)->getType())) { dep = MD.getDependency(BBI, dep); continue; } @@ -210,7 +210,7 @@ bool DSE::handleFreeWithNonTrivialDependency(FreeInst* F, Instruction* dep, Value* depPointer = dependency->getPointerOperand(); const Type* depType = dependency->getOperand(0)->getType(); - unsigned depPointerSize = TD.getTypeSize(depType); + unsigned depPointerSize = TD.getTypeStoreSize(depType); // Check for aliasing AliasAnalysis::AliasResult A = AA.alias(F->getPointerOperand(), ~0UL, @@ -329,7 +329,7 @@ bool DSE::handleEndBlock(BasicBlock& BB, unsigned pointerSize = ~0UL; if (ConstantInt* C = dyn_cast((*I)->getArraySize())) pointerSize = C->getZExtValue() * \ - TD.getTypeSize((*I)->getAllocatedType()); + TD.getABITypeSize((*I)->getAllocatedType()); // See if the call site touches it AliasAnalysis::ModRefResult A = AA.getModRefInfo(CS, *I, pointerSize); @@ -394,7 +394,7 @@ bool DSE::RemoveUndeadPointers(Value* killPointer, unsigned pointerSize = ~0UL; if (ConstantInt* C = dyn_cast((*I)->getArraySize())) pointerSize = C->getZExtValue() * \ - TD.getTypeSize((*I)->getAllocatedType()); + TD.getABITypeSize((*I)->getAllocatedType()); // See if this pointer could alias it AliasAnalysis::AliasResult A = AA.alias(*I, pointerSize, diff --git a/llvm/lib/Transforms/Scalar/InstructionCombining.cpp b/llvm/lib/Transforms/Scalar/InstructionCombining.cpp index 12523cd3fe4f..6ebf42a96d82 100644 --- a/llvm/lib/Transforms/Scalar/InstructionCombining.cpp +++ b/llvm/lib/Transforms/Scalar/InstructionCombining.cpp @@ -4438,7 +4438,7 @@ static Value *EmitGEPOffset(User *GEP, Instruction &I, InstCombiner &IC) { for (unsigned i = 1, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { Value *Op = GEP->getOperand(i); - uint64_t Size = TD.getTypeSize(GTI.getIndexedType()) & PtrSizeMask; + uint64_t Size = TD.getABITypeSize(GTI.getIndexedType()) & PtrSizeMask; if (ConstantInt *OpC = dyn_cast(Op)) { if (OpC->isZero()) continue; @@ -4523,7 +4523,7 @@ Instruction *InstCombiner::FoldGEPICmp(User *GEPLHS, Value *RHS, return ReplaceInstUsesWith(I, UndefValue::get(I.getType())); if (C->isNullValue()) EmitIt = false; - else if (TD->getTypeSize(GTI.getIndexedType()) == 0) { + else if (TD->getABITypeSize(GTI.getIndexedType()) == 0) { EmitIt = false; // This is indexing into a zero sized array? } else if (isa(C)) return ReplaceInstUsesWith(I, // No comparison is needed here. @@ -6305,8 +6305,8 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI, // same, we open the door to infinite loops of various kinds. if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return 0; - uint64_t AllocElTySize = TD->getTypeSize(AllocElTy); - uint64_t CastElTySize = TD->getTypeSize(CastElTy); + uint64_t AllocElTySize = TD->getABITypeSize(AllocElTy); + uint64_t CastElTySize = TD->getABITypeSize(CastElTy); if (CastElTySize == 0 || AllocElTySize == 0) return 0; // See if we can satisfy the modulus by pulling a scale out of the array @@ -6573,7 +6573,7 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) { // is something like [0 x {int, int}] const Type *IntPtrTy = TD->getIntPtrType(); int64_t FirstIdx = 0; - if (int64_t TySize = TD->getTypeSize(GEPIdxTy)) { + if (int64_t TySize = TD->getABITypeSize(GEPIdxTy)) { FirstIdx = Offset/TySize; Offset %= TySize; @@ -6605,7 +6605,7 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) { } } else if (isa(GEPIdxTy) || isa(GEPIdxTy)) { const SequentialType *STy = cast(GEPIdxTy); - if (uint64_t EltSize = TD->getTypeSize(STy->getElementType())) { + if (uint64_t EltSize = TD->getABITypeSize(STy->getElementType())){ NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize)); Offset %= EltSize; } else { @@ -8644,7 +8644,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { // insert it. This explicit cast can make subsequent optimizations more // obvious. Value *Op = GEP.getOperand(i); - if (TD->getTypeSize(Op->getType()) > TD->getPointerSize()) + if (TD->getTypeSizeInBits(Op->getType()) > TD->getPointerSizeInBits()) if (Constant *C = dyn_cast(Op)) { GEP.setOperand(i, ConstantExpr::getTrunc(C, TD->getIntPtrType())); MadeChange = true; @@ -8724,12 +8724,12 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { } else if (Constant *GO1C = dyn_cast(GO1)) { GO1 = ConstantExpr::getIntegerCast(GO1C, SO1->getType(), true); } else { - unsigned PS = TD->getPointerSize(); - if (TD->getTypeSize(SO1->getType()) == PS) { + unsigned PS = TD->getPointerSizeInBits(); + if (TD->getTypeSizeInBits(SO1->getType()) == PS) { // Convert GO1 to SO1's type. GO1 = InsertCastToIntPtrTy(GO1, SO1->getType(), &GEP, this); - } else if (TD->getTypeSize(GO1->getType()) == PS) { + } else if (TD->getTypeSizeInBits(GO1->getType()) == PS) { // Convert SO1 to GO1's type. SO1 = InsertCastToIntPtrTy(SO1, GO1->getType(), &GEP, this); } else { @@ -8818,8 +8818,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { const Type *SrcElTy = cast(X->getType())->getElementType(); const Type *ResElTy=cast(PtrOp->getType())->getElementType(); if (isa(SrcElTy) && - TD->getTypeSize(cast(SrcElTy)->getElementType()) == - TD->getTypeSize(ResElTy)) { + TD->getABITypeSize(cast(SrcElTy)->getElementType()) == + TD->getABITypeSize(ResElTy)) { Value *Idx[2]; Idx[0] = Constant::getNullValue(Type::Int32Ty); Idx[1] = GEP.getOperand(1); @@ -8837,7 +8837,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { if (isa(SrcElTy) && (ResElTy == Type::Int8Ty || ResElTy == Type::Int8Ty)) { uint64_t ArrayEltSize = - TD->getTypeSize(cast(SrcElTy)->getElementType()); + TD->getABITypeSize(cast(SrcElTy)->getElementType()); // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We // allow either a mul, shift, or constant here. @@ -8938,7 +8938,7 @@ Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) { // Note that we only do this for alloca's, because malloc should allocate and // return a unique pointer, even for a zero byte allocation. if (isa(AI) && AI.getAllocatedType()->isSized() && - TD->getTypeSize(AI.getAllocatedType()) == 0) + TD->getABITypeSize(AI.getAllocatedType()) == 0) return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType())); return 0; diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp index 5311a7f04f31..08c0a8885152 100644 --- a/llvm/lib/Transforms/Scalar/LICM.cpp +++ b/llvm/lib/Transforms/Scalar/LICM.cpp @@ -366,7 +366,7 @@ bool LICM::canSinkOrHoistInst(Instruction &I) { // Don't hoist loads which have may-aliased stores in loop. unsigned Size = 0; if (LI->getType()->isSized()) - Size = AA->getTargetData().getTypeSize(LI->getType()); + Size = AA->getTargetData().getTypeStoreSize(LI->getType()); return !pointerInvalidatedByLoop(LI->getOperand(0), Size); } else if (CallInst *CI = dyn_cast(&I)) { // Handle obvious cases efficiently. diff --git a/llvm/lib/Transforms/Scalar/PredicateSimplifier.cpp b/llvm/lib/Transforms/Scalar/PredicateSimplifier.cpp index 3723bcbb0a6d..e84f096fbba1 100644 --- a/llvm/lib/Transforms/Scalar/PredicateSimplifier.cpp +++ b/llvm/lib/Transforms/Scalar/PredicateSimplifier.cpp @@ -1120,11 +1120,8 @@ namespace { uint32_t typeToWidth(const Type *Ty) const { if (TD) return TD->getTypeSizeInBits(Ty); - - if (const IntegerType *ITy = dyn_cast(Ty)) - return ITy->getBitWidth(); - - return 0; + else + return Ty->getPrimitiveSizeInBits(); } static bool isRelatedBy(const ConstantRange &CR1, const ConstantRange &CR2, diff --git a/llvm/lib/Transforms/Utils/LowerAllocations.cpp b/llvm/lib/Transforms/Utils/LowerAllocations.cpp index edc4c8a96f89..b089cd6d8b4f 100644 --- a/llvm/lib/Transforms/Utils/LowerAllocations.cpp +++ b/llvm/lib/Transforms/Utils/LowerAllocations.cpp @@ -116,7 +116,7 @@ bool LowerAllocations::runOnBasicBlock(BasicBlock &BB) { // malloc(type) becomes sbyte *malloc(size) Value *MallocArg; if (LowerMallocArgToInteger) - MallocArg = ConstantInt::get(Type::Int64Ty, TD.getTypeSize(AllocTy)); + MallocArg = ConstantInt::get(Type::Int64Ty, TD.getABITypeSize(AllocTy)); else MallocArg = ConstantExpr::getSizeOf(AllocTy); MallocArg = ConstantExpr::getTruncOrBitCast(cast(MallocArg),