diff --git a/llvm/lib/Transforms/Scalar/InstructionCombining.cpp b/llvm/lib/Transforms/Scalar/InstructionCombining.cpp index 52567cb24147..5c3b7c40909a 100644 --- a/llvm/lib/Transforms/Scalar/InstructionCombining.cpp +++ b/llvm/lib/Transforms/Scalar/InstructionCombining.cpp @@ -10894,15 +10894,29 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) { if (isa(FirstInst)) { CastSrcTy = FirstInst->getOperand(0)->getType(); + + // Be careful about transforming integer PHIs. We don't want to pessimize + // the code by turning an i32 into an i1293. + if (isa(PN.getType()) && isa(CastSrcTy)) { + // If we don't have TD, we don't know if the original PHI was legal. + if (!TD) return 0; + + unsigned PHIWidth = PN.getType()->getPrimitiveSizeInBits(); + unsigned NewWidth = CastSrcTy->getPrimitiveSizeInBits(); + bool PHILegal = TD->isLegalInteger(PHIWidth); + bool NewLegal = TD->isLegalInteger(NewWidth); - // If this is a legal integer PHI node, and pulling the operation through - // would cause it to be an illegal integer PHI, don't do the transformation. - if (!TD || - (isa(PN.getType()) && - isa(CastSrcTy) && - TD->isLegalInteger(PN.getType()->getPrimitiveSizeInBits()) && - !TD->isLegalInteger(CastSrcTy->getPrimitiveSizeInBits()))) - return 0; + // If this is a legal integer PHI node, and pulling the operation through + // would cause it to be an illegal integer PHI, don't do the + // transformation. + if (PHILegal && !NewLegal) + return 0; + + // Otherwise, if both are illegal, do not increase the size of the PHI. We + // do allow things like i160 -> i64, but not i64 -> i160. + if (!PHILegal && !NewLegal && NewWidth > PHIWidth) + return 0; + } } else if (isa(FirstInst) || isa(FirstInst)) { // Can fold binop, compare or shift here if the RHS is a constant, // otherwise call FoldPHIArgBinOpIntoPHI. @@ -11075,6 +11089,7 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &PN) { // extracted out of it. First, sort the users by their offset and size. array_pod_sort(PHIUsers.begin(), PHIUsers.end()); + DEBUG(errs() << "SLICING UP PHI: " << PN << '\n'); DenseMap PredValues; @@ -11088,6 +11103,8 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &PN) { // Create the new PHI node for this user. PHINode *EltPHI = PHINode::Create(Ty, PN.getName()+".off"+Twine(Offset), &PN); + assert(EltPHI->getType() != PN.getType() && + "Truncate didn't shrink phi?"); for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) { BasicBlock *Pred = PN.getIncomingBlock(i); @@ -11118,6 +11135,9 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &PN) { } PredValues.clear(); + DEBUG(errs() << " Made element PHI for offset " << Offset << ": " + << *EltPHI << '\n'); + // Now that we have a new PHI node, replace all uses of this piece of the // PHI with the one new PHI. while (PHIUsers[UserI].Shift == Offset && @@ -11241,7 +11261,7 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) { // it is only used by trunc or trunc(lshr) operations. If so, we split the // PHI into the various pieces being extracted. This sort of thing is // introduced when SROA promotes an aggregate to a single large integer type. - if (0 && isa(PN.getType()) && TD && + if (isa(PN.getType()) && TD && !TD->isLegalInteger(PN.getType()->getPrimitiveSizeInBits())) if (Instruction *Res = SliceUpIllegalIntegerPHI(PN)) return Res; diff --git a/llvm/test/Transforms/InstCombine/phi.ll b/llvm/test/Transforms/InstCombine/phi.ll index 86e920c7229a..9164d0b78f60 100644 --- a/llvm/test/Transforms/InstCombine/phi.ll +++ b/llvm/test/Transforms/InstCombine/phi.ll @@ -245,12 +245,12 @@ end: %tmp2 = add i64 %tmp32, %tmp30 ret i64 %tmp2 -; HECK: @test12 -; HECK-NOT: zext -; HECK: end: -; HECK-NEXT: phi i64 [ 0, %entry ], [ %Val, %two ] -; HECK-NOT: phi -; HECK: ret i64 +; CHECK: @test12 +; CHECK-NOT: zext +; CHECK: end: +; CHECK-NEXT: phi i64 [ 0, %entry ], [ %Val, %two ] +; CHECK-NOT: phi +; CHECK: ret i64 } declare void @test13f(double, i32) @@ -276,11 +276,44 @@ end: call void @test13f(double %tmp31, i32 %tmp32) ret void -; HECK: @test13 -; HECK-NOT: zext -; HECK: end: -; HECK-NEXT: phi double [ 0.000000e+00, %entry ], [ %Vald, %two ] -; HECK-NEXT: call void @test13f(double {{[^,]*}}, i32 %V1) -; HECK: ret void +; CHECK: @test13 +; CHECK-NOT: zext +; CHECK: end: +; CHECK-NEXT: phi double [ 0.000000e+00, %entry ], [ %Vald, %two ] +; CHECK-NEXT: call void @test13f(double {{[^,]*}}, i32 %V1) +; CHECK: ret void } +define i640 @test14a(i320 %A, i320 %B, i1 %b1) { +BB0: + %a = zext i320 %A to i640 + %b = zext i320 %B to i640 + br label %Loop + +Loop: + %C = phi i640 [ %a, %BB0 ], [ %b, %Loop ] + br i1 %b1, label %Loop, label %Exit + +Exit: ; preds = %Loop + ret i640 %C +; CHECK: @test14a +; CHECK: Loop: +; CHECK-NEXT: phi i320 +} + +define i160 @test14b(i320 %A, i320 %B, i1 %b1) { +BB0: + %a = trunc i320 %A to i160 + %b = trunc i320 %B to i160 + br label %Loop + +Loop: + %C = phi i160 [ %a, %BB0 ], [ %b, %Loop ] + br i1 %b1, label %Loop, label %Exit + +Exit: ; preds = %Loop + ret i160 %C +; CHECK: @test14b +; CHECK: Loop: +; CHECK-NEXT: phi i160 +}