Extend the verifier to validate range metadata on calls and invokes.

Range metadata applies to loads, call, and invokes.  We were validating that metadata applied to loads was correct according to the LangRef, but we were not validating metadata applied to calls or invokes.  This change extracts the checking functionality to a common location, reuses it for all valid locations, and adds a simple test to ensure a misused range on a call gets reported.

llvm-svn: 220246
This commit is contained in:
Philip Reames 2014-10-20 23:52:07 +00:00
parent 84801c217c
commit bf9676f7f0
2 changed files with 68 additions and 52 deletions

View File

@ -269,6 +269,8 @@ private:
SmallVectorImpl<const MDNode *> &Requirements);
void visitFunction(const Function &F);
void visitBasicBlock(BasicBlock &BB);
void visitRangeMetadata(Instruction& I, MDNode* Range, Type* Ty);
// InstVisitor overrides...
using InstVisitor<Verifier>::visit;
@ -1885,34 +1887,12 @@ static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
}
void Verifier::visitLoadInst(LoadInst &LI) {
PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
Assert1(PTy, "Load operand must be a pointer.", &LI);
Type *ElTy = PTy->getElementType();
Assert2(ElTy == LI.getType(),
"Load result type does not match pointer operand type!", &LI, ElTy);
Assert1(LI.getAlignment() <= Value::MaximumAlignment,
"huge alignment values are unsupported", &LI);
if (LI.isAtomic()) {
Assert1(LI.getOrdering() != Release && LI.getOrdering() != AcquireRelease,
"Load cannot have Release ordering", &LI);
Assert1(LI.getAlignment() != 0,
"Atomic load must specify explicit alignment", &LI);
if (!ElTy->isPointerTy()) {
Assert2(ElTy->isIntegerTy(),
"atomic load operand must have integer type!",
&LI, ElTy);
unsigned Size = ElTy->getPrimitiveSizeInBits();
Assert2(Size >= 8 && !(Size & (Size - 1)),
"atomic load operand must be power-of-two byte-sized integer",
&LI, ElTy);
}
} else {
Assert1(LI.getSynchScope() == CrossThread,
"Non-atomic load cannot have SynchronizationScope specified", &LI);
}
void Verifier::visitRangeMetadata(Instruction& I,
MDNode* Range, Type* Ty) {
assert(Range &&
Range == I.getMetadata(LLVMContext::MD_range) &&
"precondition violation");
if (MDNode *Range = LI.getMetadata(LLVMContext::MD_range)) {
unsigned NumOperands = Range->getNumOperands();
Assert1(NumOperands % 2 == 0, "Unfinished range!", Range);
unsigned NumRanges = NumOperands / 2;
@ -1925,8 +1905,8 @@ void Verifier::visitLoadInst(LoadInst &LI) {
ConstantInt *High = dyn_cast<ConstantInt>(Range->getOperand(2*i + 1));
Assert1(High, "The upper limit must be an integer!", High);
Assert1(High->getType() == Low->getType() &&
High->getType() == ElTy, "Range types must match load type!",
&LI);
High->getType() == Ty, "Range types must match instruction type!",
&I);
APInt HighV = High->getValue();
APInt LowV = Low->getValue();
@ -1954,8 +1934,33 @@ void Verifier::visitLoadInst(LoadInst &LI) {
Assert1(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
Range);
}
}
void Verifier::visitLoadInst(LoadInst &LI) {
PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
Assert1(PTy, "Load operand must be a pointer.", &LI);
Type *ElTy = PTy->getElementType();
Assert2(ElTy == LI.getType(),
"Load result type does not match pointer operand type!", &LI, ElTy);
Assert1(LI.getAlignment() <= Value::MaximumAlignment,
"huge alignment values are unsupported", &LI);
if (LI.isAtomic()) {
Assert1(LI.getOrdering() != Release && LI.getOrdering() != AcquireRelease,
"Load cannot have Release ordering", &LI);
Assert1(LI.getAlignment() != 0,
"Atomic load must specify explicit alignment", &LI);
if (!ElTy->isPointerTy()) {
Assert2(ElTy->isIntegerTy(),
"atomic load operand must have integer type!",
&LI, ElTy);
unsigned Size = ElTy->getPrimitiveSizeInBits();
Assert2(Size >= 8 && !(Size & (Size - 1)),
"atomic load operand must be power-of-two byte-sized integer",
&LI, ElTy);
}
} else {
Assert1(LI.getSynchScope() == CrossThread,
"Non-atomic load cannot have SynchronizationScope specified", &LI);
}
visitInstruction(LI);
@ -2276,9 +2281,11 @@ void Verifier::visitInstruction(Instruction &I) {
}
}
MDNode *MD = I.getMetadata(LLVMContext::MD_range);
Assert1(!MD || isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
Assert1(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
"Ranges are only for loads, calls and invokes!", &I);
visitRangeMetadata(I, Range, I.getType());
}
InstsInThisBlock.insert(&I);
}

View File

@ -48,7 +48,7 @@ entry:
ret i8 %y
}
!5 = metadata !{i32 0, i8 0}
; CHECK: Range types must match load type!
; CHECK: Range types must match instruction type!
; CHECK: %y = load
define i8 @f7(i8* %x) {
@ -57,7 +57,7 @@ entry:
ret i8 %y
}
!6 = metadata !{i8 0, i32 0}
; CHECK: Range types must match load type!
; CHECK: Range types must match instruction type!
; CHECK: %y = load
define i8 @f8(i8* %x) {
@ -66,7 +66,7 @@ entry:
ret i8 %y
}
!7 = metadata !{i32 0, i32 0}
; CHECK: Range types must match load type!
; CHECK: Range types must match instruction type!
; CHECK: %y = load
define i8 @f9(i8* %x) {
@ -140,3 +140,12 @@ entry:
}
!17 = metadata !{i8 1, i8 3, i8 4, i8 5, i8 6, i8 1}
; CHECK: Intervals are contiguous
define i8 @f18() {
entry:
%y = call i8 undef(), !range !18
ret i8 %y
}
!18 = metadata !{}
; CHECK: It should have at least one range!
; CHECK-NEXT: metadata