[PM] keeping history when original SCC split and then merge into itself

in the same round of SCC update.

In https://reviews.llvm.org/rL309784, inline history is added to prevent
infinite inlining across multiple run of inliner and SCC update, but the
history will only be kept when new SCC is actually generated during SCC update.

We found a case that SCC can be split and then merge into itself in the same
round of SCC update, so the same SCC will be pop out from UR.CWorklist and
then added back immediately, without any new SCC generated, that is why the
existing patch cannot catch the infinite inline case.

What the patch does is even if no new SCC is generated, if only the current
SCC appears in UR.CWorklist again, then keep the inline history.

Differential Revision: https://reviews.llvm.org/D52915

llvm-svn: 345103
This commit is contained in:
Wei Mi 2018-10-23 23:29:45 +00:00
parent 4f82406c46
commit 80a0c97e07
3 changed files with 128 additions and 40 deletions

View File

@ -1158,10 +1158,19 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
// SCC splits and merges. To avoid this, we capture the originating caller
// node and the SCC containing the call edge. This is a slight over
// approximation of the possible inlining decisions that must be avoided,
// but is relatively efficient to store.
// but is relatively efficient to store. We use C != OldC to know when
// a new SCC is generated and the original SCC may be generated via merge
// in later iterations.
//
// It is also possible that even if no new SCC is generated
// (i.e., C == OldC), the original SCC could be split and then merged
// into the same one as itself. and the original SCC will be added into
// UR.CWorklist again, we want to catch such cases too.
//
// FIXME: This seems like a very heavyweight way of retaining the inline
// history, we should look for a more efficient way of tracking it.
if (C != OldC && llvm::any_of(InlinedCallees, [&](Function *Callee) {
if ((C != OldC || UR.CWorklist.count(OldC)) &&
llvm::any_of(InlinedCallees, [&](Function *Callee) {
return CG.lookupSCC(*CG.lookup(*Callee)) == OldC;
})) {
LLVM_DEBUG(dbgs() << "Inlined an internal call edge and split an SCC, "

View File

@ -5,7 +5,7 @@
; some out-of-band way to prevent infinitely re-inlining and re-transforming the
; code.
;
; RUN: opt < %s -passes='cgscc(inline,function(sroa,instcombine))' -S | FileCheck %s
; RUN: opt < %s -passes='cgscc(inline,function(sroa,instcombine))' -inline-threshold=50 -S | FileCheck %s
; The `test1_*` collection of functions form a directly cycling pattern.
@ -123,3 +123,110 @@ bb2:
ret void
}
; Another infinite inlining case. The initial callgraph is like following:
;
; test3_a <---> test3_b
; | ^
; v |
; test3_c <---> test3_d
;
; For all the call edges in the call graph, only test3_c and test3_d can be
; inlined into test3_a, and no other call edge can be inlined.
;
; After test3_c is inlined into test3_a, the original call edge test3_a->test3_c
; will be removed, a new call edge will be added and the call graph becomes:
;
; test3_a <---> test3_b
; \ ^
; v /
; test3_c <---> test3_d
; But test3_a, test3_b, test3_c and test3_d still belong to the same SCC.
;
; Then after test3_a->test3_d is inlined, when test3_a->test3_d is converted to
; a ref edge, the original SCC will be split into two: {test3_c, test3_d} and
; {test3_a, test3_b}, immediately after the newly added ref edge
; test3_a->test3_c will be converted to a call edge, and the two SCCs will be
; merged into the original one again. During this cycle, the original SCC will
; be added into UR.CWorklist again and this creates an infinite loop.
@a = global i64 0
@b = global i64 0
define void @test3_c(i32 %i) {
entry:
%cmp = icmp eq i32 %i, 5
br i1 %cmp, label %if.end, label %if.then
if.then: ; preds = %entry
%call = tail call i64 @random()
%t0 = load i64, i64* @a
%add = add nsw i64 %t0, %call
store i64 %add, i64* @a
br label %if.end
if.end: ; preds = %entry, %if.then
tail call void @test3_d(i32 %i)
%t6 = load i64, i64* @a
%add85 = add nsw i64 %t6, 1
store i64 %add85, i64* @a
ret void
}
declare i64 @random()
define void @test3_d(i32 %i) {
entry:
%cmp = icmp eq i32 %i, 5
br i1 %cmp, label %if.end, label %if.then
if.then: ; preds = %entry
%call = tail call i64 @random()
%t0 = load i64, i64* @a
%add = add nsw i64 %t0, %call
store i64 %add, i64* @a
br label %if.end
if.end: ; preds = %entry, %if.then
tail call void @test3_c(i32 %i)
tail call void @test3_b()
%t6 = load i64, i64* @a
%add79 = add nsw i64 %t6, 3
store i64 %add79, i64* @a
ret void
}
; Function Attrs: noinline
define void @test3_b() #0 {
entry:
tail call void @test3_a()
%t0 = load i64, i64* @a
%add = add nsw i64 %t0, 2
store i64 %add, i64* @a
ret void
}
; Check test3_c is inlined into test3_a once and only once.
; CHECK-LABEL: @test3_a(
; CHECK: tail call void @test3_b()
; CHECK-NEXT: tail call void @test3_d(i32 5)
; CHECK-NEXT: %[[LD1:.*]] = load i64, i64* @a
; CHECK-NEXT: %[[ADD1:.*]] = add nsw i64 %[[LD1]], 1
; CHECK-NEXT: store i64 %[[ADD1]], i64* @a
; CHECK-NEXT: %[[LD2:.*]] = load i64, i64* @b
; CHECK-NEXT: %[[ADD2:.*]] = add nsw i64 %[[LD2]], 5
; CHECK-NEXT: store i64 %[[ADD2]], i64* @b
; CHECK-NEXT: ret void
; Function Attrs: noinline
define void @test3_a() #0 {
entry:
tail call void @test3_b()
tail call void @test3_c(i32 5)
%t0 = load i64, i64* @b
%add = add nsw i64 %t0, 5
store i64 %add, i64* @b
ret void
}
attributes #0 = { noinline }

View File

@ -154,11 +154,7 @@ if.end3:
; NEW-NOT: call
; NEW: call void @_Z1fILb1ELi2EEvPbS0_(
; NEW-NOT: call
; NEW: call void @_Z1gi(
; NEW-NOT: call
; NEW: call void @_Z1fILb1ELi0EEvPbS0_(
; NEW-NOT: call
; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
; NEW: call void @_Z1fILb1ELi3EEvPbS0_(
; NEW-NOT: call
; NEW: call void @_Z1fILb0ELi3EEvPbS0_(
; NEW-NOT: call
@ -198,19 +194,11 @@ if.end3:
; NEW-NOT: call
; NEW: call void @_Z1gi(
; NEW-NOT: call
; NEW: call void @_Z1gi(
; NEW-NOT: call
; NEW: call void @_Z1fILb1ELi0EEvPbS0_(
; NEW-NOT: call
; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
; NEW: call void @_Z1fILb1ELi3EEvPbS0_(
; NEW-NOT: call
; NEW: call void @_Z1fILb0ELi3EEvPbS0_(
; NEW-NOT: call
; NEW: call void @_Z1gi(
; NEW-NOT: call
; NEW: call void @_Z1fILb1ELi0EEvPbS0_(
; NEW-NOT: call
; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
; NEW: call void @_Z1fILb1ELi3EEvPbS0_(
; NEW-NOT: call
; NEW: call void @_Z1fILb0ELi3EEvPbS0_(
; NEW-NOT: call
@ -260,7 +248,7 @@ if.end3:
; NEW-NOT: call
; NEW: call void @_Z1fILb1ELi4EEvPbS0_(
; NEW-NOT: call
; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
; NEW: call void @_Z1fILb0ELi4EEvPbS0_(
; NEW-NOT: call
define void @_Z1fILb0ELi2EEvPbS0_(i8* %B, i8* %E) {
entry:
@ -304,21 +292,13 @@ if.end3:
; NEW-NOT: call
; NEW: call void @_Z1gi(
; NEW-NOT: call
; NEW: call void @_Z1gi(
; NEW: call void @_Z1fILb1ELi4EEvPbS0_(
; NEW-NOT: call
; NEW: call void @_Z1fILb1ELi1EEvPbS0_(
; NEW: call void @_Z1fILb0ELi4EEvPbS0_(
; NEW-NOT: call
; NEW: call void @_Z1fILb0ELi1EEvPbS0_(
; NEW: call void @_Z1fILb1ELi4EEvPbS0_(
; NEW-NOT: call
; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
; NEW-NOT: call
; NEW: call void @_Z1gi(
; NEW-NOT: call
; NEW: call void @_Z1fILb1ELi1EEvPbS0_(
; NEW-NOT: call
; NEW: call void @_Z1fILb0ELi1EEvPbS0_(
; NEW-NOT: call
; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
; NEW: call void @_Z1fILb0ELi4EEvPbS0_(
; NEW-NOT: call
define void @_Z1fILb1ELi2EEvPbS0_(i8* %B, i8* %E) {
entry:
@ -433,15 +413,7 @@ entry:
; NEW-NOT: call
; NEW: call void @_Z1fILb1ELi1EEvPbS0_(
; NEW-NOT: call
; NEW: call void @_Z1fILb1ELi2EEvPbS0_(
; NEW-NOT: call
; NEW: call void @_Z1gi(
; NEW-NOT: call
; NEW: call void @_Z1fILb1ELi0EEvPbS0_(
; NEW-NOT: call
; NEW: call void @_Z1fILb0ELi0EEvPbS0_(
; NEW-NOT: call
; NEW: call void @_Z1fILb0ELi3EEvPbS0_(
; NEW: call void @_Z1fILb0ELi1EEvPbS0_(
; NEW-NOT: call
define void @_Z1fILb1ELi4EEvPbS0_(i8* %B, i8* %E) {
entry: