diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h index 6357e1ff54d0..b3b26f211a92 100644 --- a/llvm/include/llvm/Analysis/ValueTracking.h +++ b/llvm/include/llvm/Analysis/ValueTracking.h @@ -145,12 +145,14 @@ namespace llvm { /// CannotBeNegativeZero - Return true if we can prove that the specified FP /// value is never equal to -0.0. /// - bool CannotBeNegativeZero(const Value *V, unsigned Depth = 0); + bool CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, + unsigned Depth = 0); /// CannotBeOrderedLessThanZero - Return true if we can prove that the /// specified FP value is either a NaN or never less than 0.0. /// - bool CannotBeOrderedLessThanZero(const Value *V, unsigned Depth = 0); + bool CannotBeOrderedLessThanZero(const Value *V, const TargetLibraryInfo *TLI, + unsigned Depth = 0); /// isBytewiseValue - If the specified value can be set by repeating the same /// byte in memory, return the i8 value that it is represented with. This is diff --git a/llvm/include/llvm/Analysis/VectorUtils.h b/llvm/include/llvm/Analysis/VectorUtils.h index 531803adf5e4..42636b8e1c0c 100644 --- a/llvm/include/llvm/Analysis/VectorUtils.h +++ b/llvm/include/llvm/Analysis/VectorUtils.h @@ -59,7 +59,8 @@ Intrinsic::ID checkBinaryFloatSignature(const CallInst &I, /// \brief Returns intrinsic ID for call. /// For the input call instruction it finds mapping intrinsic and returns /// its intrinsic ID, in case it does not found it return not_intrinsic. -Intrinsic::ID getIntrinsicIDForCall(CallInst *CI, const TargetLibraryInfo *TLI); +Intrinsic::ID getIntrinsicIDForCall(const CallInst *CI, + const TargetLibraryInfo *TLI); /// \brief Find the operand of the GEP that should be checked for consecutive /// stores. This ignores trailing indices that have no effect on the final diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp index 80b92b50a74a..cefa18712dd6 100644 --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -794,7 +794,7 @@ static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, // fadd X, 0 ==> X, when we know X is not -0 if (match(Op1, m_Zero()) && - (FMF.noSignedZeros() || CannotBeNegativeZero(Op0))) + (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI))) return Op0; // fadd [nnan ninf] X, (fsub [nnan ninf] 0, X) ==> 0 @@ -830,7 +830,7 @@ static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, // fsub X, -0 ==> X, when we know X is not -0 if (match(Op1, m_NegZero()) && - (FMF.noSignedZeros() || CannotBeNegativeZero(Op0))) + (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI))) return Op0; // fsub -0.0, (fsub -0.0, X) ==> X @@ -3112,7 +3112,14 @@ static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, } // Handle fcmp with constant RHS - if (ConstantFP *CFP = dyn_cast(RHS)) { + const ConstantFP *CFP = nullptr; + if (const auto *RHSC = dyn_cast(RHS)) { + if (RHS->getType()->isVectorTy()) + CFP = dyn_cast_or_null(RHSC->getSplatValue()); + else + CFP = dyn_cast(RHSC); + } + if (CFP) { // If the constant is a nan, see if we can fold the comparison based on it. if (CFP->getValueAPF().isNaN()) { if (FCmpInst::isOrdered(Pred)) // True "if ordered and foo" @@ -3120,7 +3127,7 @@ static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, assert(FCmpInst::isUnordered(Pred) && "Comparison must be either ordered or unordered!"); // True if unordered. - return ConstantInt::getTrue(CFP->getContext()); + return ConstantInt::get(GetCompareTy(LHS), 1); } // Check whether the constant is an infinity. if (CFP->getValueAPF().isInfinity()) { @@ -3128,10 +3135,10 @@ static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, switch (Pred) { case FCmpInst::FCMP_OLT: // No value is ordered and less than negative infinity. - return ConstantInt::getFalse(CFP->getContext()); + return ConstantInt::get(GetCompareTy(LHS), 0); case FCmpInst::FCMP_UGE: // All values are unordered with or at least negative infinity. - return ConstantInt::getTrue(CFP->getContext()); + return ConstantInt::get(GetCompareTy(LHS), 1); default: break; } @@ -3139,10 +3146,10 @@ static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, switch (Pred) { case FCmpInst::FCMP_OGT: // No value is ordered and greater than infinity. - return ConstantInt::getFalse(CFP->getContext()); + return ConstantInt::get(GetCompareTy(LHS), 0); case FCmpInst::FCMP_ULE: // All values are unordered with and at most infinity. - return ConstantInt::getTrue(CFP->getContext()); + return ConstantInt::get(GetCompareTy(LHS), 1); default: break; } @@ -3151,13 +3158,13 @@ static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, if (CFP->getValueAPF().isZero()) { switch (Pred) { case FCmpInst::FCMP_UGE: - if (CannotBeOrderedLessThanZero(LHS)) - return ConstantInt::getTrue(CFP->getContext()); + if (CannotBeOrderedLessThanZero(LHS, Q.TLI)) + return ConstantInt::get(GetCompareTy(LHS), 1); break; case FCmpInst::FCMP_OLT: // X < 0 - if (CannotBeOrderedLessThanZero(LHS)) - return ConstantInt::getFalse(CFP->getContext()); + if (CannotBeOrderedLessThanZero(LHS, Q.TLI)) + return ConstantInt::get(GetCompareTy(LHS), 0); break; default: break; diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 6f8dc2c29370..757c3b6fb0d6 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -20,6 +20,7 @@ #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/Loads.h" #include "llvm/Analysis/LoopInfo.h" +#include "llvm/Analysis/VectorUtils.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/ConstantRange.h" #include "llvm/IR/Constants.h" @@ -2267,7 +2268,8 @@ bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, /// NOTE: this function will need to be revisited when we support non-default /// rounding modes! /// -bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) { +bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, + unsigned Depth) { if (const ConstantFP *CFP = dyn_cast(V)) return !CFP->getValueAPF().isNegZero(); @@ -2295,30 +2297,26 @@ bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) { if (isa(I) || isa(I)) return true; - if (const IntrinsicInst *II = dyn_cast(I)) + if (const CallInst *CI = dyn_cast(I)) { + Intrinsic::ID IID = getIntrinsicIDForCall(CI, TLI); + switch (IID) { + default: + break; // sqrt(-0.0) = -0.0, no other negative results are possible. - if (II->getIntrinsicID() == Intrinsic::sqrt) - return CannotBeNegativeZero(II->getArgOperand(0), Depth+1); - - if (const CallInst *CI = dyn_cast(I)) - if (const Function *F = CI->getCalledFunction()) { - if (F->isDeclaration()) { - // abs(x) != -0.0 - if (F->getName() == "abs") return true; - // fabs[lf](x) != -0.0 - if (F->getName() == "fabs") return true; - if (F->getName() == "fabsf") return true; - if (F->getName() == "fabsl") return true; - if (F->getName() == "sqrt" || F->getName() == "sqrtf" || - F->getName() == "sqrtl") - return CannotBeNegativeZero(CI->getArgOperand(0), Depth+1); - } + case Intrinsic::sqrt: + return CannotBeNegativeZero(CI->getArgOperand(0), TLI, Depth + 1); + // fabs(x) != -0.0 + case Intrinsic::fabs: + return true; } + } return false; } -bool llvm::CannotBeOrderedLessThanZero(const Value *V, unsigned Depth) { +bool llvm::CannotBeOrderedLessThanZero(const Value *V, + const TargetLibraryInfo *TLI, + unsigned Depth) { if (const ConstantFP *CFP = dyn_cast(V)) return !CFP->getValueAPF().isNegative() || CFP->getValueAPF().isZero(); @@ -2344,43 +2342,44 @@ bool llvm::CannotBeOrderedLessThanZero(const Value *V, unsigned Depth) { case Instruction::FAdd: case Instruction::FDiv: case Instruction::FRem: - return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1) && - CannotBeOrderedLessThanZero(I->getOperand(1), Depth+1); + return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) && + CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1); case Instruction::Select: - return CannotBeOrderedLessThanZero(I->getOperand(1), Depth+1) && - CannotBeOrderedLessThanZero(I->getOperand(2), Depth+1); + return CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1) && + CannotBeOrderedLessThanZero(I->getOperand(2), TLI, Depth + 1); case Instruction::FPExt: case Instruction::FPTrunc: // Widening/narrowing never change sign. - return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1); - case Instruction::Call: - if (const IntrinsicInst *II = dyn_cast(I)) - switch (II->getIntrinsicID()) { - default: break; - case Intrinsic::maxnum: - return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1) || - CannotBeOrderedLessThanZero(I->getOperand(1), Depth+1); - case Intrinsic::minnum: - return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1) && - CannotBeOrderedLessThanZero(I->getOperand(1), Depth+1); - case Intrinsic::exp: - case Intrinsic::exp2: - case Intrinsic::fabs: - case Intrinsic::sqrt: - return true; - case Intrinsic::powi: - if (ConstantInt *CI = dyn_cast(I->getOperand(1))) { - // powi(x,n) is non-negative if n is even. - if (CI->getBitWidth() <= 64 && CI->getSExtValue() % 2u == 0) - return true; - } - return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1); - case Intrinsic::fma: - case Intrinsic::fmuladd: - // x*x+y is non-negative if y is non-negative. - return I->getOperand(0) == I->getOperand(1) && - CannotBeOrderedLessThanZero(I->getOperand(2), Depth+1); + return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1); + case Instruction::Call: + Intrinsic::ID IID = getIntrinsicIDForCall(cast(I), TLI); + switch (IID) { + default: + break; + case Intrinsic::maxnum: + return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) || + CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1); + case Intrinsic::minnum: + return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) && + CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1); + case Intrinsic::exp: + case Intrinsic::exp2: + case Intrinsic::fabs: + case Intrinsic::sqrt: + return true; + case Intrinsic::powi: + if (ConstantInt *CI = dyn_cast(I->getOperand(1))) { + // powi(x,n) is non-negative if n is even. + if (CI->getBitWidth() <= 64 && CI->getSExtValue() % 2u == 0) + return true; } + return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1); + case Intrinsic::fma: + case Intrinsic::fmuladd: + // x*x+y is non-negative if y is non-negative. + return I->getOperand(0) == I->getOperand(1) && + CannotBeOrderedLessThanZero(I->getOperand(2), TLI, Depth + 1); + } break; } return false; diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp index 4a6cdf973e2e..cffe033ee657 100644 --- a/llvm/lib/Analysis/VectorUtils.cpp +++ b/llvm/lib/Analysis/VectorUtils.cpp @@ -121,10 +121,10 @@ llvm::checkBinaryFloatSignature(const CallInst &I, /// \brief Returns intrinsic ID for call. /// For the input call instruction it finds mapping intrinsic and returns /// its ID, in case it does not found it return not_intrinsic. -Intrinsic::ID llvm::getIntrinsicIDForCall(CallInst *CI, +Intrinsic::ID llvm::getIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI) { // If we have an intrinsic call, check if it is trivially vectorizable. - if (IntrinsicInst *II = dyn_cast(CI)) { + if (const auto *II = dyn_cast(CI)) { Intrinsic::ID ID = II->getIntrinsicID(); if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end || ID == Intrinsic::assume) diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp index bbd118c67b82..9b963fd6f125 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -4570,7 +4570,7 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) { break; // fabs(x) < 0 --> false case FCmpInst::FCMP_OLT: - return replaceInstUsesWith(I, Builder->getFalse()); + llvm_unreachable("handled by SimplifyFCmpInst"); // fabs(x) > 0 --> x != 0 case FCmpInst::FCMP_OGT: return new FCmpInst(FCmpInst::FCMP_ONE, CI->getArgOperand(0), RHSC); diff --git a/llvm/test/Transforms/InstCombine/pr27332.ll b/llvm/test/Transforms/InstCombine/pr27332.ll new file mode 100644 index 000000000000..543ffbe1fa7a --- /dev/null +++ b/llvm/test/Transforms/InstCombine/pr27332.ll @@ -0,0 +1,11 @@ +; RUN: opt -instcombine -S -o - < %s | FileCheck %s +declare <4 x float> @llvm.fabs.v4f32(<4 x float>) + +define <4 x i1> @test1(<4 x float> %V) { +entry: + %abs = call <4 x float> @llvm.fabs.v4f32(<4 x float> %V) + %cmp = fcmp olt <4 x float> %abs, zeroinitializer + ret <4 x i1> %cmp +} +; CHECK-LABEL: define <4 x i1> @test1( +; CHECK: ret <4 x i1> zeroinitializer diff --git a/llvm/test/Transforms/InstCombine/zero-point-zero-add.ll b/llvm/test/Transforms/InstCombine/zero-point-zero-add.ll index 42004017c399..e466e8ad7429 100644 --- a/llvm/test/Transforms/InstCombine/zero-point-zero-add.ll +++ b/llvm/test/Transforms/InstCombine/zero-point-zero-add.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by update_test_checks.py ; RUN: opt < %s -instcombine -S | FileCheck %s -declare double @abs(double) +declare double @fabs(double) readonly define double @test(double %X) { ; CHECK-LABEL: @test( @@ -15,10 +15,10 @@ define double @test(double %X) { define double @test1(double %X) { ; CHECK-LABEL: @test1( -; CHECK-NEXT: [[Y:%.*]] = call double @abs(double %X) +; CHECK-NEXT: [[Y:%.*]] = call double @fabs(double %X) ; CHECK-NEXT: ret double [[Y]] ; - %Y = call double @abs(double %X) + %Y = call double @fabs(double %X) %Z = fadd double %Y, 0.0 ret double %Z }