TTI: Improve default costs for addrspacecast
For some reason multiple places need to do this, and the variant the loop unroller and inliner use was not handling it. Also, introduce a new wrapper to be slightly more precise, since on AMDGPU some addrspacecasts are free, but not no-ops. llvm-svn: 362436
This commit is contained in:
parent
bf37536a35
commit
8dbeb9256c
|
@ -413,6 +413,12 @@ public:
|
|||
if (TLI->isZExtFree(OpTy, Ty))
|
||||
return TargetTransformInfo::TCC_Free;
|
||||
return TargetTransformInfo::TCC_Basic;
|
||||
|
||||
case Instruction::AddrSpaceCast:
|
||||
if (TLI->isFreeAddrSpaceCast(OpTy->getPointerAddressSpace(),
|
||||
Ty->getPointerAddressSpace()))
|
||||
return TargetTransformInfo::TCC_Free;
|
||||
return TargetTransformInfo::TCC_Basic;
|
||||
}
|
||||
|
||||
return BaseT::getOperationCost(Opcode, Ty, OpTy);
|
||||
|
@ -656,7 +662,7 @@ public:
|
|||
return 0;
|
||||
|
||||
if (Opcode == Instruction::AddrSpaceCast &&
|
||||
TLI->isNoopAddrSpaceCast(Src->getPointerAddressSpace(),
|
||||
TLI->isFreeAddrSpaceCast(Src->getPointerAddressSpace(),
|
||||
Dst->getPointerAddressSpace()))
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -1591,8 +1591,9 @@ public:
|
|||
}
|
||||
|
||||
/// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
|
||||
/// are happy to sink it into basic blocks.
|
||||
virtual bool isCheapAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
|
||||
/// are happy to sink it into basic blocks. A cast may be free, but not
|
||||
/// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer.
|
||||
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
|
||||
return isNoopAddrSpaceCast(SrcAS, DestAS);
|
||||
}
|
||||
|
||||
|
|
|
@ -1140,8 +1140,8 @@ static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI,
|
|||
// Sink only "cheap" (or nop) address-space casts. This is a weaker condition
|
||||
// than sinking only nop casts, but is helpful on some platforms.
|
||||
if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) {
|
||||
if (!TLI.isCheapAddrSpaceCast(ASC->getSrcAddressSpace(),
|
||||
ASC->getDestAddressSpace()))
|
||||
if (!TLI.isFreeAddrSpaceCast(ASC->getSrcAddressSpace(),
|
||||
ASC->getDestAddressSpace()))
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -1261,8 +1261,8 @@ bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
|
|||
return I && I->getMetadata("amdgpu.noclobber");
|
||||
}
|
||||
|
||||
bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS,
|
||||
unsigned DestAS) const {
|
||||
bool SITargetLowering::isFreeAddrSpaceCast(unsigned SrcAS,
|
||||
unsigned DestAS) const {
|
||||
// Flat -> private/local is a simple truncate.
|
||||
// Flat -> global is no-op
|
||||
if (SrcAS == AMDGPUAS::FLAT_ADDRESS)
|
||||
|
|
|
@ -246,7 +246,7 @@ public:
|
|||
bool isMemOpUniform(const SDNode *N) const;
|
||||
bool isMemOpHasNoClobberedMemOperand(const SDNode *N) const;
|
||||
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
|
||||
bool isCheapAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
|
||||
bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
|
||||
|
||||
TargetLoweringBase::LegalizeTypeAction
|
||||
getPreferredVectorAction(MVT VT) const override;
|
||||
|
|
|
@ -1,45 +1,66 @@
|
|||
; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri < %s | FileCheck %s
|
||||
|
||||
; CHECK: 'addrspacecast_global_to_flat'
|
||||
; CHECK-LABEL: 'addrspacecast_global_to_flat'
|
||||
; CHECK: estimated cost of 0 for {{.*}} addrspacecast i8 addrspace(1)* %ptr to i8*
|
||||
define i8* @addrspacecast_global_to_flat(i8 addrspace(1)* %ptr) #0 {
|
||||
%cast = addrspacecast i8 addrspace(1)* %ptr to i8*
|
||||
ret i8* %cast
|
||||
}
|
||||
|
||||
; CHECK: 'addrspacecast_global_to_flat_v2'
|
||||
; CHECK-LABEL: 'addrspacecast_global_to_flat_v2'
|
||||
; CHECK: estimated cost of 0 for {{.*}} addrspacecast <2 x i8 addrspace(1)*> %ptr to <2 x i8*>
|
||||
define <2 x i8*> @addrspacecast_global_to_flat_v2(<2 x i8 addrspace(1)*> %ptr) #0 {
|
||||
%cast = addrspacecast <2 x i8 addrspace(1)*> %ptr to <2 x i8*>
|
||||
ret <2 x i8*> %cast
|
||||
}
|
||||
|
||||
; CHECK: 'addrspacecast_global_to_flat_v32'
|
||||
; CHECK-LABEL: 'addrspacecast_global_to_flat_v32'
|
||||
; CHECK: estimated cost of 0 for {{.*}} addrspacecast <32 x i8 addrspace(1)*> %ptr to <32 x i8*>
|
||||
define <32 x i8*> @addrspacecast_global_to_flat_v32(<32 x i8 addrspace(1)*> %ptr) #0 {
|
||||
%cast = addrspacecast <32 x i8 addrspace(1)*> %ptr to <32 x i8*>
|
||||
ret <32 x i8*> %cast
|
||||
}
|
||||
|
||||
; CHECK: 'addrspacecast_local_to_flat'
|
||||
; CHECK-LABEL: 'addrspacecast_local_to_flat'
|
||||
; CHECK: estimated cost of 1 for {{.*}} addrspacecast i8 addrspace(3)* %ptr to i8*
|
||||
define i8* @addrspacecast_local_to_flat(i8 addrspace(3)* %ptr) #0 {
|
||||
%cast = addrspacecast i8 addrspace(3)* %ptr to i8*
|
||||
ret i8* %cast
|
||||
}
|
||||
|
||||
; CHECK: 'addrspacecast_local_to_flat_v2'
|
||||
; CHECK-LABEL: 'addrspacecast_local_to_flat_v2'
|
||||
; CHECK: estimated cost of 2 for {{.*}} addrspacecast <2 x i8 addrspace(3)*> %ptr to <2 x i8*>
|
||||
define <2 x i8*> @addrspacecast_local_to_flat_v2(<2 x i8 addrspace(3)*> %ptr) #0 {
|
||||
%cast = addrspacecast <2 x i8 addrspace(3)*> %ptr to <2 x i8*>
|
||||
ret <2 x i8*> %cast
|
||||
}
|
||||
|
||||
; CHECK: 'addrspacecast_local_to_flat_v32'
|
||||
; CHECK-LABEL: 'addrspacecast_local_to_flat_v32'
|
||||
; CHECK: estimated cost of 32 for {{.*}} addrspacecast <32 x i8 addrspace(3)*> %ptr to <32 x i8*>
|
||||
define <32 x i8*> @addrspacecast_local_to_flat_v32(<32 x i8 addrspace(3)*> %ptr) #0 {
|
||||
%cast = addrspacecast <32 x i8 addrspace(3)*> %ptr to <32 x i8*>
|
||||
ret <32 x i8*> %cast
|
||||
}
|
||||
|
||||
; CHECK-LABEL: 'addrspacecast_flat_to_local'
|
||||
; CHECK: estimated cost of 0 for {{.*}} addrspacecast i8* %ptr to i8 addrspace(3)*
|
||||
define i8 addrspace(3)* @addrspacecast_flat_to_local(i8* %ptr) #0 {
|
||||
%cast = addrspacecast i8* %ptr to i8 addrspace(3)*
|
||||
ret i8 addrspace(3)* %cast
|
||||
}
|
||||
|
||||
; CHECK-LABEL: 'addrspacecast_flat_to_local_v2'
|
||||
; CHECK: estimated cost of 0 for {{.*}} addrspacecast <2 x i8*> %ptr to <2 x i8 addrspace(3)*>
|
||||
define <2 x i8 addrspace(3)*> @addrspacecast_flat_to_local_v2(<2 x i8*> %ptr) #0 {
|
||||
%cast = addrspacecast <2 x i8*> %ptr to <2 x i8 addrspace(3)*>
|
||||
ret <2 x i8 addrspace(3)*> %cast
|
||||
}
|
||||
|
||||
; CHECK-LABEL: 'addrspacecast_flat_to_local_v32'
|
||||
; CHECK: estimated cost of 0 for {{.*}} addrspacecast <32 x i8*> %ptr to <32 x i8 addrspace(3)*>
|
||||
define <32 x i8 addrspace(3)*> @addrspacecast_flat_to_local_v32(<32 x i8*> %ptr) #0 {
|
||||
%cast = addrspacecast <32 x i8*> %ptr to <32 x i8 addrspace(3)*>
|
||||
ret <32 x i8 addrspace(3)*> %cast
|
||||
}
|
||||
|
||||
attributes #0 = { nounwind readnone }
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=hawaii -loop-unroll -unroll-threshold=75 -unroll-peel-count=0 -unroll-allow-partial=false -unroll-max-iteration-count-to-analyze=16 < %s | FileCheck %s
|
||||
|
||||
; CHECK-LABEL: @test_func_addrspacecast_cost_noop(
|
||||
; CHECK-NOT: br i1
|
||||
define amdgpu_kernel void @test_func_addrspacecast_cost_noop(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture %in) #0 {
|
||||
entry:
|
||||
br label %for.body
|
||||
|
||||
for.body:
|
||||
%indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
|
||||
%sum.02 = phi float [ %fmul, %for.body ], [ 0.0, %entry ]
|
||||
%arrayidx.in = getelementptr inbounds float, float addrspace(1)* %in, i32 %indvars.iv
|
||||
%arrayidx.out = getelementptr inbounds float, float addrspace(1)* %out, i32 %indvars.iv
|
||||
%cast.in = addrspacecast float addrspace(1)* %arrayidx.in to float*
|
||||
%cast.out = addrspacecast float addrspace(1)* %arrayidx.out to float*
|
||||
%load = load float, float* %cast.in
|
||||
%fmul = fmul float %load, %sum.02
|
||||
store float %fmul, float* %cast.out
|
||||
%indvars.iv.next = add i32 %indvars.iv, 1
|
||||
%exitcond = icmp eq i32 %indvars.iv.next, 16
|
||||
br i1 %exitcond, label %for.end, label %for.body
|
||||
|
||||
for.end:
|
||||
ret void
|
||||
}
|
||||
|
||||
; Free, but not a no-op
|
||||
; CHECK-LABEL: @test_func_addrspacecast_cost_free(
|
||||
; CHECK-NOT: br i1
|
||||
define amdgpu_kernel void @test_func_addrspacecast_cost_free(float* noalias nocapture %out, float* noalias nocapture %in) #0 {
|
||||
entry:
|
||||
br label %for.body
|
||||
|
||||
for.body:
|
||||
%indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
|
||||
%sum.02 = phi float [ %fmul, %for.body ], [ 0.0, %entry ]
|
||||
%arrayidx.in = getelementptr inbounds float, float* %in, i32 %indvars.iv
|
||||
%arrayidx.out = getelementptr inbounds float, float* %out, i32 %indvars.iv
|
||||
%cast.in = addrspacecast float* %arrayidx.in to float addrspace(3)*
|
||||
%cast.out = addrspacecast float* %arrayidx.out to float addrspace(3)*
|
||||
%load = load float, float addrspace(3)* %cast.in
|
||||
%fmul = fmul float %load, %sum.02
|
||||
store float %fmul, float addrspace(3)* %cast.out
|
||||
%indvars.iv.next = add i32 %indvars.iv, 1
|
||||
%exitcond = icmp eq i32 %indvars.iv.next, 16
|
||||
br i1 %exitcond, label %for.end, label %for.body
|
||||
|
||||
for.end:
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @test_func_addrspacecast_cost_nonfree(
|
||||
; CHECK: br i1 %exitcond
|
||||
define amdgpu_kernel void @test_func_addrspacecast_cost_nonfree(float addrspace(3)* noalias nocapture %out, float addrspace(3)* noalias nocapture %in) #0 {
|
||||
entry:
|
||||
br label %for.body
|
||||
|
||||
for.body:
|
||||
%indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
|
||||
%sum.02 = phi float [ %fmul, %for.body ], [ 0.0, %entry ]
|
||||
%arrayidx.in = getelementptr inbounds float, float addrspace(3)* %in, i32 %indvars.iv
|
||||
%arrayidx.out = getelementptr inbounds float, float addrspace(3)* %out, i32 %indvars.iv
|
||||
%cast.in = addrspacecast float addrspace(3)* %arrayidx.in to float*
|
||||
%cast.out = addrspacecast float addrspace(3)* %arrayidx.out to float*
|
||||
%load = load float, float* %cast.in
|
||||
%fmul = fmul float %load, %sum.02
|
||||
store float %fmul, float* %cast.out
|
||||
%indvars.iv.next = add i32 %indvars.iv, 1
|
||||
%exitcond = icmp eq i32 %indvars.iv.next, 16
|
||||
br i1 %exitcond, label %for.end, label %for.body
|
||||
|
||||
for.end:
|
||||
ret void
|
||||
}
|
||||
|
||||
attributes #0 = { nounwind }
|
||||
attributes #1 = { nounwind readnone speculatable }
|
Loading…
Reference in New Issue