AMDGPU/SI: Limit load clustering to 16 bytes instead of 4 instructions
Summary: This helps prevent load clustering from drastically increasing register pressure by trying to cluster 4 SMRDx8 loads together. The limit of 16 bytes was chosen, because it seems like that was the original intent of setting the limit to 4 instructions, but more analysis could show that a different limit is better. This fixes yields small decreases in register usage with shader-db, but also helps avoid a large increase in register usage when lane mask tracking is enabled in the machine scheduler, because lane mask tracking enables more opportunities for load clustering. shader-db stats: 2379 shaders in 477 tests Totals: SGPRS: 49744 -> 48600 (-2.30 %) VGPRS: 34120 -> 34076 (-0.13 %) Code Size: 1282888 -> 1283184 (0.02 %) bytes LDS: 28 -> 28 (0.00 %) blocks Scratch: 495616 -> 492544 (-0.62 %) bytes per wave Max Waves: 6843 -> 6853 (0.15 %) Wait states: 0 -> 0 (0.00 %) Reviewers: nhaehnle, arsenm Subscribers: arsenm, llvm-commits Differential Revision: http://reviews.llvm.org/D18451 llvm-svn: 264589
This commit is contained in:
parent
6db1dcbf6b
commit
a76bcc2ea1
|
@ -295,18 +295,43 @@ bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
|
||||||
bool SIInstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
|
bool SIInstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
|
||||||
MachineInstr *SecondLdSt,
|
MachineInstr *SecondLdSt,
|
||||||
unsigned NumLoads) const {
|
unsigned NumLoads) const {
|
||||||
// TODO: This needs finer tuning
|
const MachineOperand *FirstDst = nullptr;
|
||||||
if (NumLoads > 4)
|
const MachineOperand *SecondDst = nullptr;
|
||||||
|
|
||||||
|
if (isDS(*FirstLdSt) && isDS(*SecondLdSt)) {
|
||||||
|
FirstDst = getNamedOperand(*FirstLdSt, AMDGPU::OpName::vdst);
|
||||||
|
SecondDst = getNamedOperand(*SecondLdSt, AMDGPU::OpName::vdst);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isSMRD(*FirstLdSt) && isSMRD(*FirstLdSt)) {
|
||||||
|
FirstDst = getNamedOperand(*FirstLdSt, AMDGPU::OpName::sdst);
|
||||||
|
SecondDst = getNamedOperand(*SecondLdSt, AMDGPU::OpName::sdst);
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((isMUBUF(*FirstLdSt) && isMUBUF(*SecondLdSt)) ||
|
||||||
|
(isMTBUF(*FirstLdSt) && isMTBUF(*SecondLdSt))) {
|
||||||
|
FirstDst = getNamedOperand(*FirstLdSt, AMDGPU::OpName::vdata);
|
||||||
|
SecondDst = getNamedOperand(*SecondLdSt, AMDGPU::OpName::vdata);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!FirstDst || !SecondDst)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (isDS(*FirstLdSt) && isDS(*SecondLdSt))
|
// Try to limit clustering based on the total number of bytes loaded
|
||||||
return true;
|
// rather than the number of instructions. This is done to help reduce
|
||||||
|
// register pressure. The method used is somewhat inexact, though,
|
||||||
|
// because it assumes that all loads in the cluster will load the
|
||||||
|
// same number of bytes as FirstLdSt.
|
||||||
|
|
||||||
if (isSMRD(*FirstLdSt) && isSMRD(*SecondLdSt))
|
// The unit of this value is bytes.
|
||||||
return true;
|
// FIXME: This needs finer tuning.
|
||||||
|
unsigned LoadClusterThreshold = 16;
|
||||||
|
|
||||||
return (isMUBUF(*FirstLdSt) || isMTBUF(*FirstLdSt)) &&
|
const MachineRegisterInfo &MRI =
|
||||||
(isMUBUF(*SecondLdSt) || isMTBUF(*SecondLdSt));
|
FirstLdSt->getParent()->getParent()->getRegInfo();
|
||||||
|
const TargetRegisterClass *DstRC = MRI.getRegClass(FirstDst->getReg());
|
||||||
|
|
||||||
|
return (NumLoads * DstRC->getSize()) <= LoadClusterThreshold;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|
|
@ -60,7 +60,7 @@ define void @v_ctpop_add_chain_i32(i32 addrspace(1)* noalias %out, i32 addrspace
|
||||||
|
|
||||||
; FUNC-LABEL: {{^}}v_ctpop_add_sgpr_i32:
|
; FUNC-LABEL: {{^}}v_ctpop_add_sgpr_i32:
|
||||||
; GCN: buffer_load_dword [[VAL0:v[0-9]+]],
|
; GCN: buffer_load_dword [[VAL0:v[0-9]+]],
|
||||||
; GCN-NEXT: s_waitcnt
|
; GCN: s_waitcnt
|
||||||
; GCN-NEXT: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL0]], s{{[0-9]+}}
|
; GCN-NEXT: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL0]], s{{[0-9]+}}
|
||||||
; GCN-NEXT: buffer_store_dword [[RESULT]],
|
; GCN-NEXT: buffer_store_dword [[RESULT]],
|
||||||
; GCN: s_endpgm
|
; GCN: s_endpgm
|
||||||
|
|
|
@ -101,7 +101,7 @@ define void @madak_inline_imm_f32(float addrspace(1)* noalias %out, float addrsp
|
||||||
|
|
||||||
; We can't use an SGPR when forming madak
|
; We can't use an SGPR when forming madak
|
||||||
; GCN-LABEL: {{^}}s_v_madak_f32:
|
; GCN-LABEL: {{^}}s_v_madak_f32:
|
||||||
; GCN: s_load_dword [[SB:s[0-9]+]]
|
; GCN-DAG: s_load_dword [[SB:s[0-9]+]]
|
||||||
; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x41200000
|
; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x41200000
|
||||||
; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]]
|
; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]]
|
||||||
; GCN-NOT: v_madak_f32
|
; GCN-NOT: v_madak_f32
|
||||||
|
|
|
@ -2,17 +2,19 @@
|
||||||
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=VI --check-prefix=GCN %s
|
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=VI --check-prefix=GCN %s
|
||||||
|
|
||||||
; FUNC-LABEL: {{^}}cluster_arg_loads:
|
; FUNC-LABEL: {{^}}cluster_arg_loads:
|
||||||
|
; FIXME: Due to changes in the load clustering heuristics. We now longer
|
||||||
|
; cluster all argument loads together.
|
||||||
|
; SI: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
|
||||||
|
; SI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xe
|
||||||
; SI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x9
|
; SI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x9
|
||||||
; SI-NEXT: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb
|
; SI-NEXT: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb
|
||||||
; SI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
|
; VI: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x34
|
||||||
; SI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xe
|
; VI-NEXT: s_nop 0
|
||||||
|
; VI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x38
|
||||||
|
; VI-NEXT: s_nop 0
|
||||||
; VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x24
|
; VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x24
|
||||||
; VI-NEXT: s_nop 0
|
; VI-NEXT: s_nop 0
|
||||||
; VI-NEXT: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x2c
|
; VI-NEXT: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x2c
|
||||||
; VI-NEXT: s_nop 0
|
|
||||||
; VI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x34
|
|
||||||
; VI-NEXT: s_nop 0
|
|
||||||
; VI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x38
|
|
||||||
define void @cluster_arg_loads(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 %x, i32 %y) nounwind {
|
define void @cluster_arg_loads(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 %x, i32 %y) nounwind {
|
||||||
store i32 %x, i32 addrspace(1)* %out0, align 4
|
store i32 %x, i32 addrspace(1)* %out0, align 4
|
||||||
store i32 %y, i32 addrspace(1)* %out1, align 4
|
store i32 %y, i32 addrspace(1)* %out1, align 4
|
||||||
|
|
Loading…
Reference in New Issue