[AMDGPU] Use AssumptionCacheTracker in the divrem32 expansion

Differential Revision: https://reviews.llvm.org/D49761

llvm-svn: 337938
This commit is contained in:
Stanislav Mekhanoshin 2018-07-25 17:02:11 +00:00
parent cbb4313b1c
commit 7e7268ac1c
2 changed files with 64 additions and 13 deletions

View File

@ -17,6 +17,7 @@
#include "AMDGPUSubtarget.h"
#include "AMDGPUTargetMachine.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/DivergenceAnalysis.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/ValueTracking.h"
@ -58,6 +59,7 @@ static cl::opt<bool> WidenLoads(
class AMDGPUCodeGenPrepare : public FunctionPass,
public InstVisitor<AMDGPUCodeGenPrepare, bool> {
const GCNSubtarget *ST = nullptr;
AssumptionCache *AC = nullptr;
DivergenceAnalysis *DA = nullptr;
Module *Mod = nullptr;
bool HasUnsafeFPMath = false;
@ -134,11 +136,12 @@ class AMDGPUCodeGenPrepare : public FunctionPass,
bool promoteUniformBitreverseToI32(IntrinsicInst &I) const;
/// Expands 24 bit div or rem.
Value* expandDivRem24(IRBuilder<> &Builder, Value *Num, Value *Den,
Value* expandDivRem24(IRBuilder<> &Builder, BinaryOperator &I,
Value *Num, Value *Den,
bool IsDiv, bool IsSigned) const;
/// Expands 32 bit div or rem.
Value* expandDivRem32(IRBuilder<> &Builder, Instruction::BinaryOps Opc,
Value* expandDivRem32(IRBuilder<> &Builder, BinaryOperator &I,
Value *Num, Value *Den) const;
/// Widen a scalar load.
@ -173,6 +176,7 @@ public:
StringRef getPassName() const override { return "AMDGPU IR optimizations"; }
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<AssumptionCacheTracker>();
AU.addRequired<DivergenceAnalysis>();
AU.setPreservesAll();
}
@ -500,16 +504,17 @@ static Value* getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS) {
// The fractional part of a float is enough to accurately represent up to
// a 24-bit signed integer.
Value* AMDGPUCodeGenPrepare::expandDivRem24(IRBuilder<> &Builder,
BinaryOperator &I,
Value *Num, Value *Den,
bool IsDiv, bool IsSigned) const {
assert(Num->getType()->isIntegerTy(32));
const DataLayout &DL = Mod->getDataLayout();
unsigned LHSSignBits = ComputeNumSignBits(Num, DL);
unsigned LHSSignBits = ComputeNumSignBits(Num, DL, 0, AC, &I);
if (LHSSignBits < 9)
return nullptr;
unsigned RHSSignBits = ComputeNumSignBits(Den, DL);
unsigned RHSSignBits = ComputeNumSignBits(Den, DL, 0, AC, &I);
if (RHSSignBits < 9)
return nullptr;
@ -603,8 +608,9 @@ Value* AMDGPUCodeGenPrepare::expandDivRem24(IRBuilder<> &Builder,
}
Value* AMDGPUCodeGenPrepare::expandDivRem32(IRBuilder<> &Builder,
Instruction::BinaryOps Opc,
BinaryOperator &I,
Value *Num, Value *Den) const {
Instruction::BinaryOps Opc = I.getOpcode();
assert(Opc == Instruction::URem || Opc == Instruction::UDiv ||
Opc == Instruction::SRem || Opc == Instruction::SDiv);
@ -632,7 +638,7 @@ Value* AMDGPUCodeGenPrepare::expandDivRem32(IRBuilder<> &Builder,
}
}
if (Value *Res = expandDivRem24(Builder, Num, Den, IsDiv, IsSigned)) {
if (Value *Res = expandDivRem24(Builder, I, Num, Den, IsDiv, IsSigned)) {
Res = Builder.CreateTrunc(Res, Ty);
return Res;
}
@ -767,16 +773,16 @@ bool AMDGPUCodeGenPrepare::visitBinaryOperator(BinaryOperator &I) {
if (VectorType *VT = dyn_cast<VectorType>(Ty)) {
NewDiv = UndefValue::get(VT);
for (unsigned I = 0, E = VT->getNumElements(); I != E; ++I) {
Value *NumEltI = Builder.CreateExtractElement(Num, I);
Value *DenEltI = Builder.CreateExtractElement(Den, I);
Value *NewElt = expandDivRem32(Builder, Opc, NumEltI, DenEltI);
for (unsigned N = 0, E = VT->getNumElements(); N != E; ++N) {
Value *NumEltN = Builder.CreateExtractElement(Num, N);
Value *DenEltN = Builder.CreateExtractElement(Den, N);
Value *NewElt = expandDivRem32(Builder, I, NumEltN, DenEltN);
if (!NewElt)
NewElt = Builder.CreateBinOp(Opc, NumEltI, DenEltI);
NewDiv = Builder.CreateInsertElement(NewDiv, NewElt, I);
NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN);
NewDiv = Builder.CreateInsertElement(NewDiv, NewElt, N);
}
} else {
NewDiv = expandDivRem32(Builder, Opc, Num, Den);
NewDiv = expandDivRem32(Builder, I, Num, Den);
}
if (NewDiv) {
@ -891,6 +897,7 @@ bool AMDGPUCodeGenPrepare::runOnFunction(Function &F) {
const AMDGPUTargetMachine &TM = TPC->getTM<AMDGPUTargetMachine>();
ST = &TM.getSubtarget<GCNSubtarget>(F);
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
DA = &getAnalysis<DivergenceAnalysis>();
HasUnsafeFPMath = hasUnsafeFPMath(F);
AMDGPUASI = TM.getAMDGPUAS();
@ -910,6 +917,7 @@ bool AMDGPUCodeGenPrepare::runOnFunction(Function &F) {
INITIALIZE_PASS_BEGIN(AMDGPUCodeGenPrepare, DEBUG_TYPE,
"AMDGPU IR optimizations", false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(DivergenceAnalysis)
INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations",
false, false)

View File

@ -0,0 +1,43 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -mtriple=amdgcn-- -amdgpu-codegenprepare %s | FileCheck %s
define amdgpu_kernel void @divrem24_assume(i32 addrspace(1)* %arg, i32 %arg1) {
; CHECK-LABEL: @divrem24_assume(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x(), !range !0
; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i32 [[ARG1:%.*]], 42
; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP2]])
; CHECK-NEXT: [[TMP0:%.*]] = uitofp i32 [[TMP]] to float
; CHECK-NEXT: [[TMP1:%.*]] = uitofp i32 [[ARG1]] to float
; CHECK-NEXT: [[TMP2:%.*]] = fdiv fast float 1.000000e+00, [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = fmul fast float [[TMP0]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call fast float @llvm.trunc.f32(float [[TMP3]])
; CHECK-NEXT: [[TMP5:%.*]] = fsub fast float -0.000000e+00, [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP5]], float [[TMP1]], float [[TMP0]])
; CHECK-NEXT: [[TMP7:%.*]] = fptoui float [[TMP4]] to i32
; CHECK-NEXT: [[TMP8:%.*]] = call fast float @llvm.fabs.f32(float [[TMP6]])
; CHECK-NEXT: [[TMP9:%.*]] = call fast float @llvm.fabs.f32(float [[TMP1]])
; CHECK-NEXT: [[TMP10:%.*]] = fcmp fast oge float [[TMP8]], [[TMP9]]
; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP10]], i32 1, i32 0
; CHECK-NEXT: [[TMP12:%.*]] = add i32 [[TMP7]], [[TMP11]]
; CHECK-NEXT: [[TMP13:%.*]] = and i32 [[TMP12]], 1023
; CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP13]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[ARG:%.*]], i64 [[TMP4]]
; CHECK-NEXT: store i32 0, i32 addrspace(1)* [[TMP5]], align 4
; CHECK-NEXT: ret void
;
bb:
%tmp = tail call i32 @llvm.amdgcn.workitem.id.x(), !range !0
%tmp2 = icmp ult i32 %arg1, 42
tail call void @llvm.assume(i1 %tmp2)
%tmp3 = udiv i32 %tmp, %arg1
%tmp4 = zext i32 %tmp3 to i64
%tmp5 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp4
store i32 0, i32 addrspace(1)* %tmp5, align 4
ret void
}
declare void @llvm.assume(i1)
declare i32 @llvm.amdgcn.workitem.id.x()
!0 = !{i32 0, i32 1024}