ScalarEvolution assume hanging bugfix

http://reviews.llvm.org/D12719

llvm-svn: 247184
This commit is contained in:
Piotr Padlewski 2015-09-09 20:47:30 +00:00
parent 5ae4a85e3f
commit 0dde00d239
2 changed files with 152 additions and 13 deletions

View File

@ -6958,18 +6958,6 @@ ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
LoopContinuePredicate->getSuccessor(0) != L->getHeader()))
return true;
// Check conditions due to any @llvm.assume intrinsics.
for (auto &AssumeVH : AC.assumptions()) {
if (!AssumeVH)
continue;
auto *CI = cast<CallInst>(AssumeVH);
if (!DT.dominates(CI, Latch->getTerminator()))
continue;
if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
return true;
}
struct ClearWalkingBEDominatingCondsOnExit {
ScalarEvolution &SE;
@ -6981,7 +6969,7 @@ ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
}
};
// We don't want more than one activation of the following loop on the stack
// We don't want more than one activation of the following loops on the stack
// -- that can lead to O(n!) time complexity.
if (WalkingBEDominatingConds)
return false;
@ -6989,6 +6977,18 @@ ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
WalkingBEDominatingConds = true;
ClearWalkingBEDominatingCondsOnExit ClearOnExit(*this);
// Check conditions due to any @llvm.assume intrinsics.
for (auto &AssumeVH : AC.assumptions()) {
if (!AssumeVH)
continue;
auto *CI = cast<CallInst>(AssumeVH);
if (!DT.dominates(CI, Latch->getTerminator()))
continue;
if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
return true;
}
// If the loop is not reachable from the entry block, we risk running into an
// infinite loop as we walk up into the dom tree. These loops do not matter
// anyway, so we just return a conservative answer when we see them.

View File

@ -0,0 +1,139 @@
; RUN: opt %s -always-inline | opt -analyze -scalar-evolution
; There was optimization bug in ScalarEvolution, that causes too long
; compute time and stack overflow crash.
declare void @body(i32)
declare void @llvm.assume(i1)
define available_externally void @assume1(i64 %i.ext, i64 %a) alwaysinline {
%cmp0 = icmp ne i64 %i.ext, %a
call void @llvm.assume(i1 %cmp0)
%a1 = add i64 %a, 1
%cmp1 = icmp ne i64 %i.ext, %a1
call void @llvm.assume(i1 %cmp1)
%a2 = add i64 %a1, 1
%cmp2 = icmp ne i64 %i.ext, %a2
call void @llvm.assume(i1 %cmp2)
%a3 = add i64 %a2, 1
%cmp3 = icmp ne i64 %i.ext, %a3
call void @llvm.assume(i1 %cmp3)
%a4 = add i64 %a3, 1
%cmp4 = icmp ne i64 %i.ext, %a4
call void @llvm.assume(i1 %cmp4)
ret void
}
define available_externally void @assume2(i64 %i.ext, i64 %a) alwaysinline {
call void @assume1(i64 %i.ext, i64 %a)
%a1 = add i64 %a, 5
%cmp1 = icmp ne i64 %i.ext, %a1
call void @assume1(i64 %i.ext, i64 %a1)
%a2 = add i64 %a1, 5
%cmp2 = icmp ne i64 %i.ext, %a2
call void @assume1(i64 %i.ext, i64 %a2)
%a3 = add i64 %a2, 5
%cmp3 = icmp ne i64 %i.ext, %a3
call void @assume1(i64 %i.ext, i64 %a3)
%a4 = add i64 %a3, 5
%cmp4 = icmp ne i64 %i.ext, %a4
call void @assume1(i64 %i.ext, i64 %a4)
ret void
}
define available_externally void @assume3(i64 %i.ext, i64 %a) alwaysinline {
call void @assume2(i64 %i.ext, i64 %a)
%a1 = add i64 %a, 25
%cmp1 = icmp ne i64 %i.ext, %a1
call void @assume2(i64 %i.ext, i64 %a1)
%a2 = add i64 %a1, 25
%cmp2 = icmp ne i64 %i.ext, %a2
call void @assume2(i64 %i.ext, i64 %a2)
%a3 = add i64 %a2, 25
%cmp3 = icmp ne i64 %i.ext, %a3
call void @assume2(i64 %i.ext, i64 %a3)
%a4 = add i64 %a3, 25
%cmp4 = icmp ne i64 %i.ext, %a4
call void @assume2(i64 %i.ext, i64 %a4)
ret void
}
define available_externally void @assume4(i64 %i.ext, i64 %a) alwaysinline {
call void @assume3(i64 %i.ext, i64 %a)
%a1 = add i64 %a, 125
%cmp1 = icmp ne i64 %i.ext, %a1
call void @assume3(i64 %i.ext, i64 %a1)
%a2 = add i64 %a1, 125
%cmp2 = icmp ne i64 %i.ext, %a2
call void @assume3(i64 %i.ext, i64 %a2)
%a3 = add i64 %a2, 125
%cmp3 = icmp ne i64 %i.ext, %a3
call void @assume3(i64 %i.ext, i64 %a3)
%a4 = add i64 %a3, 125
%cmp4 = icmp ne i64 %i.ext, %a4
call void @assume3(i64 %i.ext, i64 %a4)
ret void
}
define available_externally void @assume5(i64 %i.ext, i64 %a) alwaysinline {
call void @assume4(i64 %i.ext, i64 %a)
%a1 = add i64 %a, 625
%cmp1 = icmp ne i64 %i.ext, %a1
call void @assume4(i64 %i.ext, i64 %a1)
%a2 = add i64 %a1, 625
%cmp2 = icmp ne i64 %i.ext, %a2
call void @assume4(i64 %i.ext, i64 %a2)
%a3 = add i64 %a2, 625
%cmp3 = icmp ne i64 %i.ext, %a3
call void @assume4(i64 %i.ext, i64 %a3)
%a4 = add i64 %a3, 625
%cmp4 = icmp ne i64 %i.ext, %a4
call void @assume4(i64 %i.ext, i64 %a4)
ret void
}
define void @fn(i32 %init) {
entry:
br label %loop
loop:
%i = phi i32 [%init, %entry], [%next, %loop]
call void @body(i32 %i)
%i.ext = zext i32 %i to i64
call void @assume5(i64 %i.ext, i64 500000000)
%i.next = add i64 %i.ext, 1
%next = trunc i64 %i.next to i32
%done = icmp eq i32 %i, 500000000
br i1 %done, label %exit, label %loop
exit:
ret void
}