[AMDGPU] gfx10 conditional registers handling

This is cpp source part of wave32 support, excluding overriden
getRegClass().

Differential Revision: https://reviews.llvm.org/D63351

llvm-svn: 363513
This commit is contained in:
Stanislav Mekhanoshin 2019-06-16 17:13:09 +00:00
parent e20b388e2f
commit 5250021672
19 changed files with 788 additions and 309 deletions

View File

@ -1014,6 +1014,9 @@ void AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
}
void AMDGPUDAGToDAGISel::SelectDIV_FMAS(SDNode *N) {
const GCNSubtarget *ST = static_cast<const GCNSubtarget *>(Subtarget);
const SIRegisterInfo *TRI = ST->getRegisterInfo();
SDLoc SL(N);
EVT VT = N->getValueType(0);
@ -1025,7 +1028,7 @@ void AMDGPUDAGToDAGISel::SelectDIV_FMAS(SDNode *N) {
SDValue CarryIn = N->getOperand(3);
// V_DIV_FMAS implicitly reads VCC.
SDValue VCC = CurDAG->getCopyToReg(CurDAG->getEntryNode(), SL,
AMDGPU::VCC, CarryIn, SDValue());
TRI->getVCC(), CarryIn, SDValue());
SDValue Ops[10];
@ -1842,9 +1845,12 @@ void AMDGPUDAGToDAGISel::SelectBRCOND(SDNode *N) {
return;
}
const GCNSubtarget *ST = static_cast<const GCNSubtarget *>(Subtarget);
const SIRegisterInfo *TRI = ST->getRegisterInfo();
bool UseSCCBr = isCBranchSCC(N) && isUniformBr(N);
unsigned BrOp = UseSCCBr ? AMDGPU::S_CBRANCH_SCC1 : AMDGPU::S_CBRANCH_VCCNZ;
unsigned CondReg = UseSCCBr ? AMDGPU::SCC : AMDGPU::VCC;
unsigned CondReg = UseSCCBr ? (unsigned)AMDGPU::SCC : TRI->getVCC();
SDLoc SL(N);
if (!UseSCCBr) {
@ -1861,9 +1867,13 @@ void AMDGPUDAGToDAGISel::SelectBRCOND(SDNode *N) {
// the S_AND when is unnecessary. But it would be better to add a separate
// pass after SIFixSGPRCopies to do the unnecessary S_AND removal, so it
// catches both cases.
Cond = SDValue(CurDAG->getMachineNode(AMDGPU::S_AND_B64, SL, MVT::i1,
CurDAG->getRegister(AMDGPU::EXEC, MVT::i1),
Cond),
Cond = SDValue(CurDAG->getMachineNode(ST->isWave32() ? AMDGPU::S_AND_B32
: AMDGPU::S_AND_B64,
SL, MVT::i1,
CurDAG->getRegister(ST->isWave32() ? AMDGPU::EXEC_LO
: AMDGPU::EXEC,
MVT::i1),
Cond),
0);
}

View File

@ -1042,6 +1042,10 @@ public:
return WavefrontSize == 32;
}
const TargetRegisterClass *getBoolRC() const {
return getRegisterInfo()->getBoolRC();
}
/// \returns Maximum number of work groups per compute unit supported by the
/// subtarget and limited by given \p FlatWorkGroupSize.
unsigned getMaxWorkGroupsPerCU(unsigned FlatWorkGroupSize) const override {

View File

@ -627,9 +627,11 @@ void SIFrameLowering::emitPrologue(MachineFunction &MF,
ScratchExecCopy
= findScratchNonCalleeSaveRegister(MF, LiveRegs,
AMDGPU::SReg_64_XEXECRegClass);
*TRI.getWaveMaskRegClass());
BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_OR_SAVEEXEC_B64),
const unsigned OrSaveExec = ST.isWave32() ?
AMDGPU::S_OR_SAVEEXEC_B32 : AMDGPU::S_OR_SAVEEXEC_B64;
BuildMI(MBB, MBBI, DL, TII->get(OrSaveExec),
ScratchExecCopy)
.addImm(-1);
}
@ -641,7 +643,9 @@ void SIFrameLowering::emitPrologue(MachineFunction &MF,
if (ScratchExecCopy != AMDGPU::NoRegister) {
// FIXME: Split block and make terminator.
BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
BuildMI(MBB, MBBI, DL, TII->get(ExecMov), Exec)
.addReg(ScratchExecCopy);
}
}
@ -663,6 +667,7 @@ void SIFrameLowering::emitEpilogue(MachineFunction &MF,
if (!Reg.FI.hasValue())
continue;
const SIRegisterInfo &TRI = TII->getRegisterInfo();
if (ScratchExecCopy == AMDGPU::NoRegister) {
// See emitPrologue
LivePhysRegs LiveRegs(*ST.getRegisterInfo());
@ -670,9 +675,12 @@ void SIFrameLowering::emitEpilogue(MachineFunction &MF,
ScratchExecCopy
= findScratchNonCalleeSaveRegister(MF, LiveRegs,
AMDGPU::SReg_64_XEXECRegClass);
*TRI.getWaveMaskRegClass());
BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_OR_SAVEEXEC_B64), ScratchExecCopy)
const unsigned OrSaveExec = ST.isWave32() ?
AMDGPU::S_OR_SAVEEXEC_B32 : AMDGPU::S_OR_SAVEEXEC_B64;
BuildMI(MBB, MBBI, DL, TII->get(OrSaveExec), ScratchExecCopy)
.addImm(-1);
}
@ -683,7 +691,9 @@ void SIFrameLowering::emitEpilogue(MachineFunction &MF,
if (ScratchExecCopy != AMDGPU::NoRegister) {
// FIXME: Split block and make terminator.
BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
BuildMI(MBB, MBBI, DL, TII->get(ExecMov), Exec)
.addReg(ScratchExecCopy);
}

View File

@ -2924,12 +2924,16 @@ static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
int Offset,
bool UseGPRIdxMode,
bool IsIndirectSrc) {
MachineFunction *MF = OrigBB.getParent();
const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
const SIRegisterInfo *TRI = ST.getRegisterInfo();
MachineBasicBlock::iterator I = LoopBB.begin();
unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
const TargetRegisterClass *BoolRC = TRI->getBoolRC();
unsigned PhiExec = MRI.createVirtualRegister(BoolRC);
unsigned NewExec = MRI.createVirtualRegister(BoolRC);
unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned CondReg = MRI.createVirtualRegister(BoolRC);
BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
.addReg(InitReg)
@ -2953,7 +2957,9 @@ static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
.addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
// Update EXEC, save the original EXEC value to VCC.
BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec)
BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32
: AMDGPU::S_AND_SAVEEXEC_B64),
NewExec)
.addReg(CondReg, RegState::Kill);
MRI.setSimpleHint(NewExec, CondReg);
@ -2988,10 +2994,12 @@ static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
}
// Update EXEC, switch all done bits to 0 and all todo bits to 1.
unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
MachineInstr *InsertPt =
BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64_term), AMDGPU::EXEC)
.addReg(AMDGPU::EXEC)
.addReg(NewExec);
BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_XOR_B32_term
: AMDGPU::S_XOR_B64_term), Exec)
.addReg(Exec)
.addReg(NewExec);
// XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
// s_cbranch_scc0?
@ -3017,19 +3025,24 @@ static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII,
bool UseGPRIdxMode,
bool IsIndirectSrc) {
MachineFunction *MF = MBB.getParent();
const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
const SIRegisterInfo *TRI = ST.getRegisterInfo();
MachineRegisterInfo &MRI = MF->getRegInfo();
const DebugLoc &DL = MI.getDebugLoc();
MachineBasicBlock::iterator I(&MI);
const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
unsigned SaveExec = MRI.createVirtualRegister(BoolXExecRC);
unsigned TmpExec = MRI.createVirtualRegister(BoolXExecRC);
unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
// Save the EXEC mask
BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec)
.addReg(AMDGPU::EXEC);
BuildMI(MBB, I, DL, TII->get(MovExecOpc), SaveExec)
.addReg(Exec);
// To insert the loop we need to split the block. Move everything after this
// point to a new block, and insert a new empty block between the two.
@ -3057,7 +3070,7 @@ static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII,
Offset, UseGPRIdxMode, IsIndirectSrc);
MachineBasicBlock::iterator First = RemainderBB->begin();
BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
BuildMI(*RemainderBB, First, DL, TII->get(MovExecOpc), Exec)
.addReg(SaveExec);
return InsPt;
@ -3349,6 +3362,9 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
case AMDGPU::S_ADD_U64_PSEUDO:
case AMDGPU::S_SUB_U64_PSEUDO: {
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
const SIRegisterInfo *TRI = ST.getRegisterInfo();
const TargetRegisterClass *BoolRC = TRI->getBoolRC();
const DebugLoc &DL = MI.getDebugLoc();
MachineOperand &Dest = MI.getOperand(0);
@ -3359,17 +3375,17 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
Src0, BoolRC, AMDGPU::sub0,
&AMDGPU::SReg_32_XM0RegClass);
MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
Src0, BoolRC, AMDGPU::sub1,
&AMDGPU::SReg_32_XM0RegClass);
MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
Src1, BoolRC, AMDGPU::sub0,
&AMDGPU::SReg_32_XM0RegClass);
MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
Src1, BoolRC, AMDGPU::sub1,
&AMDGPU::SReg_32_XM0RegClass);
bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
@ -3405,6 +3421,14 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
MI.eraseFromParent();
return BB;
case AMDGPU::SI_INIT_EXEC_LO:
// This should be before all vector instructions.
BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B32),
AMDGPU::EXEC_LO)
.addImm(MI.getOperand(0).getImm());
MI.eraseFromParent();
return BB;
case AMDGPU::SI_INIT_EXEC_FROM_INPUT: {
// Extract the thread count from an SGPR input and set EXEC accordingly.
// Since BFM can't shift by 64, handle that case with CMP + CMOV.
@ -3438,18 +3462,23 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
(void)Found;
// This should be before all vector instructions.
unsigned Mask = (getSubtarget()->getWavefrontSize() << 1) - 1;
bool isWave32 = getSubtarget()->isWave32();
unsigned Exec = isWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
.addReg(InputReg)
.addImm((MI.getOperand(1).getImm() & 0x7f) | 0x70000);
BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFM_B64),
AMDGPU::EXEC)
.addImm((MI.getOperand(1).getImm() & Mask) | 0x70000);
BuildMI(*BB, FirstMI, DebugLoc(),
TII->get(isWave32 ? AMDGPU::S_BFM_B32 : AMDGPU::S_BFM_B64),
Exec)
.addReg(CountReg)
.addImm(0);
BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32))
.addReg(CountReg, RegState::Kill)
.addImm(64);
BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMOV_B64),
AMDGPU::EXEC)
.addImm(getSubtarget()->getWavefrontSize());
BuildMI(*BB, FirstMI, DebugLoc(),
TII->get(isWave32 ? AMDGPU::S_CMOV_B32 : AMDGPU::S_CMOV_B64),
Exec)
.addImm(-1);
MI.eraseFromParent();
return BB;
@ -3480,6 +3509,8 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
return splitKillBlock(MI, BB);
case AMDGPU::V_CNDMASK_B64_PSEUDO: {
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
const SIRegisterInfo *TRI = ST.getRegisterInfo();
unsigned Dst = MI.getOperand(0).getReg();
unsigned Src0 = MI.getOperand(1).getReg();
@ -3489,7 +3520,8 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned SrcCondCopy = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
const auto *CondRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
unsigned SrcCondCopy = MRI.createVirtualRegister(CondRC);
BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
.addReg(SrcCond);
@ -3567,7 +3599,9 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
auto I = BuildMI(*BB, MI, DL, TII->get(Opc), MI.getOperand(0).getReg());
if (TII->isVOP3(*I)) {
I.addReg(AMDGPU::VCC, RegState::Define);
const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
const SIRegisterInfo *TRI = ST.getRegisterInfo();
I.addReg(TRI->getVCC(), RegState::Define);
}
I.add(MI.getOperand(1))
.add(MI.getOperand(2));
@ -10003,6 +10037,7 @@ SITargetLowering::getConstraintType(StringRef Constraint) const {
void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
MachineRegisterInfo &MRI = MF.getRegInfo();
SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
if (Info->isEntryFunction()) {
@ -10030,6 +10065,25 @@ void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
Info->limitOccupancy(MF);
if (ST.isWave32() && !MF.empty()) {
// Add VCC_HI def because many instructions marked as imp-use VCC where
// we may only define VCC_LO. If nothing defines VCC_HI we may end up
// having a use of undef.
const SIInstrInfo *TII = ST.getInstrInfo();
DebugLoc DL;
MachineBasicBlock &MBB = MF.front();
MachineBasicBlock::iterator I = MBB.getFirstNonDebugInstr();
BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), AMDGPU::VCC_HI);
for (auto &MBB : MF) {
for (auto &MI : MBB) {
TII->fixImplicitOperands(MI);
}
}
}
TargetLoweringBase::finalizeLowering(MF);
}

View File

@ -271,6 +271,9 @@ void SIInsertSkips::kill(MachineInstr &MI) {
break;
}
case AMDGPU::SI_KILL_I1_TERMINATOR: {
const MachineFunction *MF = MI.getParent()->getParent();
const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
const MachineOperand &Op = MI.getOperand(0);
int64_t KillVal = MI.getOperand(1).getImm();
assert(KillVal == 0 || KillVal == -1);
@ -281,14 +284,17 @@ void SIInsertSkips::kill(MachineInstr &MI) {
assert(Imm == 0 || Imm == -1);
if (Imm == KillVal)
BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
BuildMI(MBB, &MI, DL, TII->get(ST.isWave32() ? AMDGPU::S_MOV_B32
: AMDGPU::S_MOV_B64), Exec)
.addImm(0);
break;
}
unsigned Opcode = KillVal ? AMDGPU::S_ANDN2_B64 : AMDGPU::S_AND_B64;
BuildMI(MBB, &MI, DL, TII->get(Opcode), AMDGPU::EXEC)
.addReg(AMDGPU::EXEC)
if (ST.isWave32())
Opcode = KillVal ? AMDGPU::S_ANDN2_B32 : AMDGPU::S_AND_B32;
BuildMI(MBB, &MI, DL, TII->get(Opcode), Exec)
.addReg(Exec)
.add(Op);
break;
}
@ -337,9 +343,11 @@ bool SIInsertSkips::optimizeVccBranch(MachineInstr &MI) const {
// S_CBRANCH_EXEC[N]Z
bool Changed = false;
MachineBasicBlock &MBB = *MI.getParent();
const unsigned CondReg = AMDGPU::VCC;
const unsigned ExecReg = AMDGPU::EXEC;
const unsigned And = AMDGPU::S_AND_B64;
const GCNSubtarget &ST = MBB.getParent()->getSubtarget<GCNSubtarget>();
const bool IsWave32 = ST.isWave32();
const unsigned CondReg = TRI->getVCC();
const unsigned ExecReg = IsWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
const unsigned And = IsWave32 ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
MachineBasicBlock::reverse_iterator A = MI.getReverseIterator(),
E = MBB.rend();

View File

@ -1426,9 +1426,9 @@ bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
// bit is updated, so we can restore the bit by reading the value of
// vcc and then writing it back to the register.
BuildMI(Block, Inst, Inst.getDebugLoc(),
TII->get(AMDGPU::S_MOV_B64),
AMDGPU::VCC)
.addReg(AMDGPU::VCC);
TII->get(ST->isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64),
TRI->getVCC())
.addReg(TRI->getVCC());
VCCZBugHandledSet.insert(&Inst);
Modified = true;
}

View File

@ -527,6 +527,21 @@ void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
return;
}
if (DestReg == AMDGPU::VCC_LO) {
if (AMDGPU::SReg_32RegClass.contains(SrcReg)) {
BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), AMDGPU::VCC_LO)
.addReg(SrcReg, getKillRegState(KillSrc));
} else {
// FIXME: Hack until VReg_1 removed.
assert(AMDGPU::VGPR_32RegClass.contains(SrcReg));
BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32))
.addImm(0)
.addReg(SrcReg, getKillRegState(KillSrc));
}
return;
}
if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) {
reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
return;
@ -698,11 +713,15 @@ void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB,
unsigned TrueReg,
unsigned FalseReg) const {
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
MachineFunction *MF = MBB.getParent();
const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
const TargetRegisterClass *BoolXExecRC =
RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass &&
"Not a VGPR32 reg");
if (Cond.size() == 1) {
unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
unsigned SReg = MRI.createVirtualRegister(BoolXExecRC);
BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
.add(Cond[0]);
BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
@ -715,8 +734,9 @@ void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB,
assert(Cond[0].isImm() && "Cond[0] is not an immediate");
switch (Cond[0].getImm()) {
case SIInstrInfo::SCC_TRUE: {
unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg)
unsigned SReg = MRI.createVirtualRegister(BoolXExecRC);
BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
: AMDGPU::S_CSELECT_B64), SReg)
.addImm(-1)
.addImm(0);
BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
@ -728,8 +748,9 @@ void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB,
break;
}
case SIInstrInfo::SCC_FALSE: {
unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg)
unsigned SReg = MRI.createVirtualRegister(BoolXExecRC);
BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
: AMDGPU::S_CSELECT_B64), SReg)
.addImm(0)
.addImm(-1);
BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
@ -743,7 +764,7 @@ void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB,
case SIInstrInfo::VCCNZ: {
MachineOperand RegOp = Cond[1];
RegOp.setImplicit(false);
unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
unsigned SReg = MRI.createVirtualRegister(BoolXExecRC);
BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
.add(RegOp);
BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
@ -757,7 +778,7 @@ void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB,
case SIInstrInfo::VCCZ: {
MachineOperand RegOp = Cond[1];
RegOp.setImplicit(false);
unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
unsigned SReg = MRI.createVirtualRegister(BoolXExecRC);
BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
.add(RegOp);
BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
@ -769,11 +790,13 @@ void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB,
break;
}
case SIInstrInfo::EXECNZ: {
unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
unsigned SReg2 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
BuildMI(MBB, I, DL, get(AMDGPU::S_OR_SAVEEXEC_B64), SReg2)
unsigned SReg = MRI.createVirtualRegister(BoolXExecRC);
unsigned SReg2 = MRI.createVirtualRegister(RI.getBoolRC());
BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32
: AMDGPU::S_OR_SAVEEXEC_B64), SReg2)
.addImm(0);
BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg)
BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
: AMDGPU::S_CSELECT_B64), SReg)
.addImm(-1)
.addImm(0);
BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
@ -785,11 +808,13 @@ void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB,
break;
}
case SIInstrInfo::EXECZ: {
unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
unsigned SReg2 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
BuildMI(MBB, I, DL, get(AMDGPU::S_OR_SAVEEXEC_B64), SReg2)
unsigned SReg = MRI.createVirtualRegister(BoolXExecRC);
unsigned SReg2 = MRI.createVirtualRegister(RI.getBoolRC());
BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32
: AMDGPU::S_OR_SAVEEXEC_B64), SReg2)
.addImm(0);
BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg)
BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
: AMDGPU::S_CSELECT_B64), SReg)
.addImm(0)
.addImm(-1);
BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
@ -814,7 +839,7 @@ unsigned SIInstrInfo::insertEQ(MachineBasicBlock *MBB,
const DebugLoc &DL,
unsigned SrcReg, int Value) const {
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned Reg = MRI.createVirtualRegister(RI.getBoolRC());
BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg)
.addImm(Value)
.addReg(SrcReg);
@ -827,7 +852,7 @@ unsigned SIInstrInfo::insertNE(MachineBasicBlock *MBB,
const DebugLoc &DL,
unsigned SrcReg, int Value) const {
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned Reg = MRI.createVirtualRegister(RI.getBoolRC());
BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg)
.addImm(Value)
.addReg(SrcReg);
@ -1208,18 +1233,42 @@ bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
MI.setDesc(get(AMDGPU::S_MOV_B64));
break;
case AMDGPU::S_MOV_B32_term:
// This is only a terminator to get the correct spill code placement during
// register allocation.
MI.setDesc(get(AMDGPU::S_MOV_B32));
break;
case AMDGPU::S_XOR_B64_term:
// This is only a terminator to get the correct spill code placement during
// register allocation.
MI.setDesc(get(AMDGPU::S_XOR_B64));
break;
case AMDGPU::S_XOR_B32_term:
// This is only a terminator to get the correct spill code placement during
// register allocation.
MI.setDesc(get(AMDGPU::S_XOR_B32));
break;
case AMDGPU::S_OR_B32_term:
// This is only a terminator to get the correct spill code placement during
// register allocation.
MI.setDesc(get(AMDGPU::S_OR_B32));
break;
case AMDGPU::S_ANDN2_B64_term:
// This is only a terminator to get the correct spill code placement during
// register allocation.
MI.setDesc(get(AMDGPU::S_ANDN2_B64));
break;
case AMDGPU::S_ANDN2_B32_term:
// This is only a terminator to get the correct spill code placement during
// register allocation.
MI.setDesc(get(AMDGPU::S_ANDN2_B32));
break;
case AMDGPU::V_MOV_B64_PSEUDO: {
unsigned Dst = MI.getOperand(0).getReg();
unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
@ -1249,24 +1298,28 @@ bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
break;
}
case AMDGPU::V_SET_INACTIVE_B32: {
BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC)
.addReg(AMDGPU::EXEC);
unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64;
unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
BuildMI(MBB, MI, DL, get(NotOpc), Exec)
.addReg(Exec);
BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg())
.add(MI.getOperand(2));
BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC)
.addReg(AMDGPU::EXEC);
BuildMI(MBB, MI, DL, get(NotOpc), Exec)
.addReg(Exec);
MI.eraseFromParent();
break;
}
case AMDGPU::V_SET_INACTIVE_B64: {
BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC)
.addReg(AMDGPU::EXEC);
unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64;
unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
BuildMI(MBB, MI, DL, get(NotOpc), Exec)
.addReg(Exec);
MachineInstr *Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO),
MI.getOperand(0).getReg())
.add(MI.getOperand(2));
expandPostRAPseudo(*Copy);
BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC)
.addReg(AMDGPU::EXEC);
BuildMI(MBB, MI, DL, get(NotOpc), Exec)
.addReg(Exec);
MI.eraseFromParent();
break;
}
@ -1330,13 +1383,14 @@ bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
case AMDGPU::ENTER_WWM: {
// This only gets its own opcode so that SIPreAllocateWWMRegs can tell when
// WWM is entered.
MI.setDesc(get(AMDGPU::S_OR_SAVEEXEC_B64));
MI.setDesc(get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32
: AMDGPU::S_OR_SAVEEXEC_B64));
break;
}
case AMDGPU::EXIT_WWM: {
// This only gets its own opcode so that SIPreAllocateWWMRegs can tell when
// WWM is exited.
MI.setDesc(get(AMDGPU::S_MOV_B64));
MI.setDesc(get(ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64));
break;
}
case TargetOpcode::BUNDLE: {
@ -1699,6 +1753,10 @@ bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
case AMDGPU::S_MOV_B64_term:
case AMDGPU::S_XOR_B64_term:
case AMDGPU::S_ANDN2_B64_term:
case AMDGPU::S_MOV_B32_term:
case AMDGPU::S_XOR_B32_term:
case AMDGPU::S_OR_B32_term:
case AMDGPU::S_ANDN2_B32_term:
break;
case AMDGPU::SI_IF:
case AMDGPU::SI_ELSE:
@ -1978,6 +2036,7 @@ void SIInstrInfo::insertSelect(MachineBasicBlock &MBB,
.addReg(FalseReg, 0, SubIdx)
.addReg(TrueReg, 0, SubIdx);
preserveCondRegFlags(Select->getOperand(3), Cond[1]);
fixImplicitOperands(*Select);
MIB.addReg(DstElt)
.addImm(SubIdx);
@ -2782,7 +2841,8 @@ MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI,
// dst
Inst32.add(MI.getOperand(0));
} else {
assert(MI.getOperand(0).getReg() == AMDGPU::VCC &&
assert(((MI.getOperand(0).getReg() == AMDGPU::VCC) ||
(MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) &&
"Unexpected case");
}
@ -2850,6 +2910,8 @@ static unsigned findImplicitSGPRRead(const MachineInstr &MI) {
switch (MO.getReg()) {
case AMDGPU::VCC:
case AMDGPU::VCC_LO:
case AMDGPU::VCC_HI:
case AMDGPU::M0:
case AMDGPU::FLAT_SCR:
return MO.getReg();
@ -3795,6 +3857,7 @@ void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI,
Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill);
Src1.setSubReg(Src0SubReg);
fixImplicitOperands(MI);
}
// Legalize VOP3 operands. All operand types are supported for any operand
@ -3971,15 +4034,27 @@ static void
emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI,
MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB,
const DebugLoc &DL, MachineOperand &Rsrc) {
MachineFunction &MF = *OrigBB.getParent();
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
const SIRegisterInfo *TRI = ST.getRegisterInfo();
unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
unsigned SaveExecOpc =
ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64;
unsigned XorTermOpc =
ST.isWave32() ? AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term;
unsigned AndOpc =
ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
MachineBasicBlock::iterator I = LoopBB.begin();
unsigned VRsrc = Rsrc.getReg();
unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef());
unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned CondReg0 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned CondReg1 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned AndCond = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned SaveExec = MRI.createVirtualRegister(BoolXExecRC);
unsigned CondReg0 = MRI.createVirtualRegister(BoolXExecRC);
unsigned CondReg1 = MRI.createVirtualRegister(BoolXExecRC);
unsigned AndCond = MRI.createVirtualRegister(BoolXExecRC);
unsigned SRsrcSub0 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
unsigned SRsrcSub1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
unsigned SRsrcSub2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
@ -4017,22 +4092,22 @@ emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI,
BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg1)
.addReg(SRsrc, 0, AMDGPU::sub2_sub3)
.addReg(VRsrc, 0, AMDGPU::sub2_sub3);
BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_AND_B64), AndCond)
BuildMI(LoopBB, I, DL, TII.get(AndOpc), AndCond)
.addReg(CondReg0)
.addReg(CondReg1);
MRI.setSimpleHint(SaveExec, AndCond);
// Update EXEC to matching lanes, saving original to SaveExec.
BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_AND_SAVEEXEC_B64), SaveExec)
BuildMI(LoopBB, I, DL, TII.get(SaveExecOpc), SaveExec)
.addReg(AndCond, RegState::Kill);
// The original instruction is here; we insert the terminators after it.
I = LoopBB.end();
// Update EXEC, switch all done bits to 0 and all todo bits to 1.
BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_XOR_B64_term), AMDGPU::EXEC)
.addReg(AMDGPU::EXEC)
BuildMI(LoopBB, I, DL, TII.get(XorTermOpc), Exec)
.addReg(Exec)
.addReg(SaveExec);
BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_CBRANCH_EXECNZ)).addMBB(&LoopBB);
}
@ -4043,15 +4118,19 @@ static void loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI,
MachineOperand &Rsrc, MachineDominatorTree *MDT) {
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
const SIRegisterInfo *TRI = ST.getRegisterInfo();
MachineRegisterInfo &MRI = MF.getRegInfo();
MachineBasicBlock::iterator I(&MI);
const DebugLoc &DL = MI.getDebugLoc();
unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
unsigned SaveExec = MRI.createVirtualRegister(BoolXExecRC);
// Save the EXEC mask
BuildMI(MBB, I, DL, TII.get(AMDGPU::S_MOV_B64), SaveExec)
.addReg(AMDGPU::EXEC);
BuildMI(MBB, I, DL, TII.get(MovExecOpc), SaveExec).addReg(Exec);
// Killed uses in the instruction we are waterfalling around will be
// incorrect due to the added control-flow.
@ -4100,8 +4179,7 @@ static void loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI,
// Restore the EXEC mask
MachineBasicBlock::iterator First = RemainderBB->begin();
BuildMI(*RemainderBB, First, DL, TII.get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
.addReg(SaveExec);
BuildMI(*RemainderBB, First, DL, TII.get(MovExecOpc), Exec).addReg(SaveExec);
}
// Extract pointer from Rsrc and return a zero-value Rsrc replacement.
@ -4332,14 +4410,16 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI,
// NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0
DebugLoc DL = MI.getDebugLoc();
BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo)
fixImplicitOperands(*
BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo)
.addReg(RsrcPtr, 0, AMDGPU::sub0)
.addReg(VAddr->getReg(), 0, AMDGPU::sub0);
.addReg(VAddr->getReg(), 0, AMDGPU::sub0));
// NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1
BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi)
fixImplicitOperands(*
BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi)
.addReg(RsrcPtr, 0, AMDGPU::sub1)
.addReg(VAddr->getReg(), 0, AMDGPU::sub1);
.addReg(VAddr->getReg(), 0, AMDGPU::sub1));
// NewVaddr = {NewVaddrHi, NewVaddrLo}
BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr)
@ -4563,10 +4643,16 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst,
case AMDGPU::S_CBRANCH_SCC0:
case AMDGPU::S_CBRANCH_SCC1:
// Clear unused bits of vcc
BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64),
AMDGPU::VCC)
.addReg(AMDGPU::EXEC)
.addReg(AMDGPU::VCC);
if (ST.isWave32())
BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B32),
AMDGPU::VCC_LO)
.addReg(AMDGPU::EXEC_LO)
.addReg(AMDGPU::VCC_LO);
else
BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64),
AMDGPU::VCC)
.addReg(AMDGPU::EXEC)
.addReg(AMDGPU::VCC);
break;
case AMDGPU::S_BFE_U64:
@ -4644,6 +4730,7 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst,
}
Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent());
fixImplicitOperands(Inst);
if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
const MachineOperand &OffsetWidthOp = Inst.getOperand(2);
@ -4957,13 +5044,14 @@ void SIInstrInfo::splitScalar64BitAddSub(SetVectorType &Worklist,
MachineBasicBlock &MBB = *Inst.getParent();
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
unsigned FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned CarryReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
unsigned DeadCarryReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
unsigned CarryReg = MRI.createVirtualRegister(CarryRC);
unsigned DeadCarryReg = MRI.createVirtualRegister(CarryRC);
MachineOperand &Dest = Inst.getOperand(0);
MachineOperand &Src0 = Inst.getOperand(1);
@ -5661,7 +5749,7 @@ void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry,
MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo();
if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) {
unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned DstReg = MRI.createVirtualRegister(RI.getBoolRC());
MachineInstr *SIIF =
BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg)
.add(Branch->getOperand(0))
@ -5688,8 +5776,8 @@ void SIInstrInfo::convertNonUniformLoopRegion(
if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) {
unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned BackEdgeReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned DstReg = MRI.createVirtualRegister(RI.getBoolRC());
unsigned BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC());
MachineInstrBuilder HeaderPHIBuilder =
BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg);
for (MachineBasicBlock::pred_iterator PI = LoopEntry->pred_begin(),
@ -5699,7 +5787,7 @@ void SIInstrInfo::convertNonUniformLoopRegion(
HeaderPHIBuilder.addReg(BackEdgeReg);
} else {
MachineBasicBlock *PMBB = *PI;
unsigned ZeroReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned ZeroReg = MRI.createVirtualRegister(RI.getBoolRC());
materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(),
ZeroReg, 0);
HeaderPHIBuilder.addReg(ZeroReg);
@ -5781,8 +5869,8 @@ SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB,
return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg);
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
unsigned UnusedCarry = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
MRI.setRegAllocationHint(UnusedCarry, 0, AMDGPU::VCC);
unsigned UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC());
MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC());
return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_I32_e64), DestReg)
.addReg(UnusedCarry, RegState::Define | RegState::Dead);
@ -5809,6 +5897,20 @@ const MCInstrDesc &SIInstrInfo::getKillTerminatorFromPseudo(unsigned Opcode) con
}
}
void SIInstrInfo::fixImplicitOperands(MachineInstr &MI) const {
MachineBasicBlock *MBB = MI.getParent();
MachineFunction *MF = MBB->getParent();
const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
if (!ST.isWave32())
return;
for (auto &Op : MI.implicit_operands()) {
if (Op.isReg() && Op.getReg() == AMDGPU::VCC)
Op.setReg(AMDGPU::VCC_LO);
}
}
bool SIInstrInfo::isBufferSMRD(const MachineInstr &MI) const {
if (!isSMRD(MI))
return false;

View File

@ -942,6 +942,8 @@ public:
/// Return -1 if the target-specific opcode for the pseudo instruction does
/// not exist. If Opcode is not a pseudo instruction, this is identity.
int pseudoToMCOpcode(int Opcode) const;
void fixImplicitOperands(MachineInstr &MI) const;
};
/// \brief Returns true if a reg:subreg pair P has a TRC class

View File

@ -1144,9 +1144,10 @@ unsigned SILoadStoreOptimizer::computeBase(MachineInstr &MI,
MachineOperand OffsetLo = createRegOrImm(static_cast<int32_t>(Addr.Offset), MI);
MachineOperand OffsetHi =
createRegOrImm(static_cast<int32_t>(Addr.Offset >> 32), MI);
unsigned CarryReg = MRI->createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
unsigned DeadCarryReg =
MRI->createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
unsigned CarryReg = MRI->createVirtualRegister(CarryRC);
unsigned DeadCarryReg = MRI->createVirtualRegister(CarryRC);
unsigned DestSub0 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned DestSub1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);

View File

@ -82,6 +82,16 @@ private:
LiveIntervals *LIS = nullptr;
MachineRegisterInfo *MRI = nullptr;
const TargetRegisterClass *BoolRC = nullptr;
unsigned AndOpc;
unsigned OrOpc;
unsigned XorOpc;
unsigned MovTermOpc;
unsigned Andn2TermOpc;
unsigned XorTermrOpc;
unsigned OrSaveExecOpc;
unsigned Exec;
void emitIf(MachineInstr &MI);
void emitElse(MachineInstr &MI);
void emitIfBreak(MachineInstr &MI);
@ -188,16 +198,16 @@ void SILowerControlFlow::emitIf(MachineInstr &MI) {
// Add an implicit def of exec to discourage scheduling VALU after this which
// will interfere with trying to form s_and_saveexec_b64 later.
unsigned CopyReg = SimpleIf ? SaveExecReg
: MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
: MRI->createVirtualRegister(BoolRC);
MachineInstr *CopyExec =
BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), CopyReg)
.addReg(AMDGPU::EXEC)
.addReg(AMDGPU::EXEC, RegState::ImplicitDefine);
.addReg(Exec)
.addReg(Exec, RegState::ImplicitDefine);
unsigned Tmp = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned Tmp = MRI->createVirtualRegister(BoolRC);
MachineInstr *And =
BuildMI(MBB, I, DL, TII->get(AMDGPU::S_AND_B64), Tmp)
BuildMI(MBB, I, DL, TII->get(AndOpc), Tmp)
.addReg(CopyReg)
.add(Cond);
@ -206,7 +216,7 @@ void SILowerControlFlow::emitIf(MachineInstr &MI) {
MachineInstr *Xor = nullptr;
if (!SimpleIf) {
Xor =
BuildMI(MBB, I, DL, TII->get(AMDGPU::S_XOR_B64), SaveExecReg)
BuildMI(MBB, I, DL, TII->get(XorOpc), SaveExecReg)
.addReg(Tmp)
.addReg(CopyReg);
setImpSCCDefDead(*Xor, ImpDefSCC.isDead());
@ -215,7 +225,7 @@ void SILowerControlFlow::emitIf(MachineInstr &MI) {
// Use a copy that is a terminator to get correct spill code placement it with
// fast regalloc.
MachineInstr *SetExec =
BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64_term), AMDGPU::EXEC)
BuildMI(MBB, I, DL, TII->get(MovTermOpc), Exec)
.addReg(Tmp, RegState::Kill);
// Insert a pseudo terminator to help keep the verifier happy. This will also
@ -265,7 +275,7 @@ void SILowerControlFlow::emitElse(MachineInstr &MI) {
// We are running before TwoAddressInstructions, and si_else's operands are
// tied. In order to correctly tie the registers, split this into a copy of
// the src like it does.
unsigned CopyReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned CopyReg = MRI->createVirtualRegister(BoolRC);
MachineInstr *CopyExec =
BuildMI(MBB, Start, DL, TII->get(AMDGPU::COPY), CopyReg)
.add(MI.getOperand(1)); // Saved EXEC
@ -273,9 +283,9 @@ void SILowerControlFlow::emitElse(MachineInstr &MI) {
// This must be inserted before phis and any spill code inserted before the
// else.
unsigned SaveReg = ExecModified ?
MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass) : DstReg;
MRI->createVirtualRegister(BoolRC) : DstReg;
MachineInstr *OrSaveExec =
BuildMI(MBB, Start, DL, TII->get(AMDGPU::S_OR_SAVEEXEC_B64), SaveReg)
BuildMI(MBB, Start, DL, TII->get(OrSaveExecOpc), SaveReg)
.addReg(CopyReg);
MachineBasicBlock *DestBB = MI.getOperand(2).getMBB();
@ -284,8 +294,8 @@ void SILowerControlFlow::emitElse(MachineInstr &MI) {
if (ExecModified) {
MachineInstr *And =
BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::S_AND_B64), DstReg)
.addReg(AMDGPU::EXEC)
BuildMI(MBB, ElsePt, DL, TII->get(AndOpc), DstReg)
.addReg(Exec)
.addReg(SaveReg);
if (LIS)
@ -293,8 +303,8 @@ void SILowerControlFlow::emitElse(MachineInstr &MI) {
}
MachineInstr *Xor =
BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::S_XOR_B64_term), AMDGPU::EXEC)
.addReg(AMDGPU::EXEC)
BuildMI(MBB, ElsePt, DL, TII->get(XorTermrOpc), Exec)
.addReg(Exec)
.addReg(DstReg);
MachineInstr *Branch =
@ -347,14 +357,14 @@ void SILowerControlFlow::emitIfBreak(MachineInstr &MI) {
// exit" mask.
MachineInstr *And = nullptr, *Or = nullptr;
if (!SkipAnding) {
And = BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_B64), Dst)
.addReg(AMDGPU::EXEC)
And = BuildMI(MBB, &MI, DL, TII->get(AndOpc), Dst)
.addReg(Exec)
.add(MI.getOperand(1));
Or = BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
Or = BuildMI(MBB, &MI, DL, TII->get(OrOpc), Dst)
.addReg(Dst)
.add(MI.getOperand(2));
} else
Or = BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
Or = BuildMI(MBB, &MI, DL, TII->get(OrOpc), Dst)
.add(MI.getOperand(1))
.add(MI.getOperand(2));
@ -372,8 +382,8 @@ void SILowerControlFlow::emitLoop(MachineInstr &MI) {
const DebugLoc &DL = MI.getDebugLoc();
MachineInstr *AndN2 =
BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64_term), AMDGPU::EXEC)
.addReg(AMDGPU::EXEC)
BuildMI(MBB, &MI, DL, TII->get(Andn2TermOpc), Exec)
.addReg(Exec)
.add(MI.getOperand(0));
MachineInstr *Branch =
@ -394,8 +404,8 @@ void SILowerControlFlow::emitEndCf(MachineInstr &MI) {
MachineBasicBlock::iterator InsPt = MBB.begin();
MachineInstr *NewMI =
BuildMI(MBB, InsPt, DL, TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC)
.addReg(AMDGPU::EXEC)
BuildMI(MBB, InsPt, DL, TII->get(OrOpc), Exec)
.addReg(Exec)
.add(MI.getOperand(0));
if (LIS)
@ -427,13 +437,13 @@ void SILowerControlFlow::findMaskOperands(MachineInstr &MI, unsigned OpNo,
// does not really modify exec.
for (auto I = Def->getIterator(); I != MI.getIterator(); ++I)
if (I->modifiesRegister(AMDGPU::EXEC, TRI) &&
!(I->isCopy() && I->getOperand(0).getReg() != AMDGPU::EXEC))
!(I->isCopy() && I->getOperand(0).getReg() != Exec))
return;
for (const auto &SrcOp : Def->explicit_operands())
if (SrcOp.isReg() && SrcOp.isUse() &&
(TargetRegisterInfo::isVirtualRegister(SrcOp.getReg()) ||
SrcOp.getReg() == AMDGPU::EXEC))
SrcOp.getReg() == Exec))
Src.push_back(SrcOp);
}
@ -471,6 +481,27 @@ bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) {
// This doesn't actually need LiveIntervals, but we can preserve them.
LIS = getAnalysisIfAvailable<LiveIntervals>();
MRI = &MF.getRegInfo();
BoolRC = TRI->getBoolRC();
if (ST.isWave32()) {
AndOpc = AMDGPU::S_AND_B32;
OrOpc = AMDGPU::S_OR_B32;
XorOpc = AMDGPU::S_XOR_B32;
MovTermOpc = AMDGPU::S_MOV_B32_term;
Andn2TermOpc = AMDGPU::S_ANDN2_B32_term;
XorTermrOpc = AMDGPU::S_XOR_B32_term;
OrSaveExecOpc = AMDGPU::S_OR_SAVEEXEC_B32;
Exec = AMDGPU::EXEC_LO;
} else {
AndOpc = AMDGPU::S_AND_B64;
OrOpc = AMDGPU::S_OR_B64;
XorOpc = AMDGPU::S_XOR_B64;
MovTermOpc = AMDGPU::S_MOV_B64_term;
Andn2TermOpc = AMDGPU::S_ANDN2_B64_term;
XorTermrOpc = AMDGPU::S_XOR_B64_term;
OrSaveExecOpc = AMDGPU::S_OR_SAVEEXEC_B64;
Exec = AMDGPU::EXEC;
}
MachineFunction::iterator NextBB;
for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
@ -507,6 +538,8 @@ bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) {
case AMDGPU::S_AND_B64:
case AMDGPU::S_OR_B64:
case AMDGPU::S_AND_B32:
case AMDGPU::S_OR_B32:
// Cleanup bit manipulations on exec mask
combineMasks(MI);
Last = I;

View File

@ -7,8 +7,8 @@
//===----------------------------------------------------------------------===//
//
// This pass lowers all occurrences of i1 values (with a vreg_1 register class)
// to lane masks (64-bit scalar registers). The pass assumes machine SSA form
// and a wave-level control flow graph.
// to lane masks (32 / 64-bit scalar registers). The pass assumes machine SSA
// form and a wave-level control flow graph.
//
// Before this pass, values that are semantically i1 and are defined and used
// within the same basic block are already represented as lane masks in scalar
@ -50,6 +50,7 @@ public:
static char ID;
private:
bool IsWave32 = false;
MachineFunction *MF = nullptr;
MachineDominatorTree *DT = nullptr;
MachinePostDominatorTree *PDT = nullptr;
@ -57,6 +58,14 @@ private:
const GCNSubtarget *ST = nullptr;
const SIInstrInfo *TII = nullptr;
unsigned ExecReg;
unsigned MovOp;
unsigned AndOp;
unsigned OrOp;
unsigned XorOp;
unsigned AndN2Op;
unsigned OrN2Op;
DenseSet<unsigned> ConstrainRegs;
public:
@ -411,8 +420,10 @@ FunctionPass *llvm::createSILowerI1CopiesPass() {
}
static unsigned createLaneMaskReg(MachineFunction &MF) {
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
MachineRegisterInfo &MRI = MF.getRegInfo();
return MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
return MRI.createVirtualRegister(ST.isWave32() ? &AMDGPU::SReg_32RegClass
: &AMDGPU::SReg_64RegClass);
}
static unsigned insertUndefLaneMask(MachineBasicBlock &MBB) {
@ -442,13 +453,32 @@ bool SILowerI1Copies::runOnMachineFunction(MachineFunction &TheMF) {
ST = &MF->getSubtarget<GCNSubtarget>();
TII = ST->getInstrInfo();
IsWave32 = ST->isWave32();
if (IsWave32) {
ExecReg = AMDGPU::EXEC_LO;
MovOp = AMDGPU::S_MOV_B32;
AndOp = AMDGPU::S_AND_B32;
OrOp = AMDGPU::S_OR_B32;
XorOp = AMDGPU::S_XOR_B32;
AndN2Op = AMDGPU::S_ANDN2_B32;
OrN2Op = AMDGPU::S_ORN2_B32;
} else {
ExecReg = AMDGPU::EXEC;
MovOp = AMDGPU::S_MOV_B64;
AndOp = AMDGPU::S_AND_B64;
OrOp = AMDGPU::S_OR_B64;
XorOp = AMDGPU::S_XOR_B64;
AndN2Op = AMDGPU::S_ANDN2_B64;
OrN2Op = AMDGPU::S_ORN2_B64;
}
lowerCopiesFromI1();
lowerPhis();
lowerCopiesToI1();
for (unsigned Reg : ConstrainRegs)
MRI->constrainRegClass(Reg, &AMDGPU::SReg_64_XEXECRegClass);
MRI->constrainRegClass(Reg, &AMDGPU::SReg_1_XEXECRegClass);
ConstrainRegs.clear();
return true;
@ -518,7 +548,8 @@ void SILowerI1Copies::lowerPhis() {
LLVM_DEBUG(dbgs() << "Lower PHI: " << MI);
MRI->setRegClass(DstReg, &AMDGPU::SReg_64RegClass);
MRI->setRegClass(DstReg, IsWave32 ? &AMDGPU::SReg_32RegClass
: &AMDGPU::SReg_64RegClass);
// Collect incoming values.
for (unsigned i = 1; i < MI.getNumOperands(); i += 2) {
@ -648,7 +679,8 @@ void SILowerI1Copies::lowerCopiesToI1() {
LLVM_DEBUG(dbgs() << "Lower Other: " << MI);
MRI->setRegClass(DstReg, &AMDGPU::SReg_64RegClass);
MRI->setRegClass(DstReg, IsWave32 ? &AMDGPU::SReg_32RegClass
: &AMDGPU::SReg_64RegClass);
if (MI.getOpcode() == AMDGPU::IMPLICIT_DEF)
continue;
@ -707,7 +739,7 @@ bool SILowerI1Copies::isConstantLaneMask(unsigned Reg, bool &Val) const {
return false;
}
if (MI->getOpcode() != AMDGPU::S_MOV_B64)
if (MI->getOpcode() != MovOp)
return false;
if (!MI->getOperand(1).isImm())
@ -782,10 +814,10 @@ void SILowerI1Copies::buildMergeLaneMasks(MachineBasicBlock &MBB,
if (PrevVal == CurVal) {
BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(CurReg);
} else if (CurVal) {
BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(AMDGPU::EXEC);
BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(ExecReg);
} else {
BuildMI(MBB, I, DL, TII->get(AMDGPU::S_XOR_B64), DstReg)
.addReg(AMDGPU::EXEC)
BuildMI(MBB, I, DL, TII->get(XorOp), DstReg)
.addReg(ExecReg)
.addImm(-1);
}
return;
@ -798,9 +830,9 @@ void SILowerI1Copies::buildMergeLaneMasks(MachineBasicBlock &MBB,
PrevMaskedReg = PrevReg;
} else {
PrevMaskedReg = createLaneMaskReg(*MF);
BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ANDN2_B64), PrevMaskedReg)
BuildMI(MBB, I, DL, TII->get(AndN2Op), PrevMaskedReg)
.addReg(PrevReg)
.addReg(AMDGPU::EXEC);
.addReg(ExecReg);
}
}
if (!CurConstant) {
@ -809,9 +841,9 @@ void SILowerI1Copies::buildMergeLaneMasks(MachineBasicBlock &MBB,
CurMaskedReg = CurReg;
} else {
CurMaskedReg = createLaneMaskReg(*MF);
BuildMI(MBB, I, DL, TII->get(AMDGPU::S_AND_B64), CurMaskedReg)
BuildMI(MBB, I, DL, TII->get(AndOp), CurMaskedReg)
.addReg(CurReg)
.addReg(AMDGPU::EXEC);
.addReg(ExecReg);
}
}
@ -822,12 +854,12 @@ void SILowerI1Copies::buildMergeLaneMasks(MachineBasicBlock &MBB,
BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg)
.addReg(PrevMaskedReg);
} else if (PrevConstant && PrevVal) {
BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ORN2_B64), DstReg)
BuildMI(MBB, I, DL, TII->get(OrN2Op), DstReg)
.addReg(CurMaskedReg)
.addReg(AMDGPU::EXEC);
.addReg(ExecReg);
} else {
BuildMI(MBB, I, DL, TII->get(AMDGPU::S_OR_B64), DstReg)
BuildMI(MBB, I, DL, TII->get(OrOp), DstReg)
.addReg(PrevMaskedReg)
.addReg(CurMaskedReg ? CurMaskedReg : (unsigned)AMDGPU::EXEC);
.addReg(CurMaskedReg ? CurMaskedReg : ExecReg);
}
}

View File

@ -56,13 +56,16 @@ char SIOptimizeExecMasking::ID = 0;
char &llvm::SIOptimizeExecMaskingID = SIOptimizeExecMasking::ID;
/// If \p MI is a copy from exec, return the register copied to.
static unsigned isCopyFromExec(const MachineInstr &MI) {
static unsigned isCopyFromExec(const MachineInstr &MI, const GCNSubtarget &ST) {
switch (MI.getOpcode()) {
case AMDGPU::COPY:
case AMDGPU::S_MOV_B64:
case AMDGPU::S_MOV_B64_term: {
case AMDGPU::S_MOV_B64_term:
case AMDGPU::S_MOV_B32:
case AMDGPU::S_MOV_B32_term: {
const MachineOperand &Src = MI.getOperand(1);
if (Src.isReg() && Src.getReg() == AMDGPU::EXEC)
if (Src.isReg() &&
Src.getReg() == (ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC))
return MI.getOperand(0).getReg();
}
}
@ -71,16 +74,20 @@ static unsigned isCopyFromExec(const MachineInstr &MI) {
}
/// If \p MI is a copy to exec, return the register copied from.
static unsigned isCopyToExec(const MachineInstr &MI) {
static unsigned isCopyToExec(const MachineInstr &MI, const GCNSubtarget &ST) {
switch (MI.getOpcode()) {
case AMDGPU::COPY:
case AMDGPU::S_MOV_B64: {
case AMDGPU::S_MOV_B64:
case AMDGPU::S_MOV_B32: {
const MachineOperand &Dst = MI.getOperand(0);
if (Dst.isReg() && Dst.getReg() == AMDGPU::EXEC && MI.getOperand(1).isReg())
if (Dst.isReg() &&
Dst.getReg() == (ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC) &&
MI.getOperand(1).isReg())
return MI.getOperand(1).getReg();
break;
}
case AMDGPU::S_MOV_B64_term:
case AMDGPU::S_MOV_B32_term:
llvm_unreachable("should have been replaced");
}
@ -105,6 +112,23 @@ static unsigned isLogicalOpOnExec(const MachineInstr &MI) {
const MachineOperand &Src2 = MI.getOperand(2);
if (Src2.isReg() && Src2.getReg() == AMDGPU::EXEC)
return MI.getOperand(0).getReg();
break;
}
case AMDGPU::S_AND_B32:
case AMDGPU::S_OR_B32:
case AMDGPU::S_XOR_B32:
case AMDGPU::S_ANDN2_B32:
case AMDGPU::S_ORN2_B32:
case AMDGPU::S_NAND_B32:
case AMDGPU::S_NOR_B32:
case AMDGPU::S_XNOR_B32: {
const MachineOperand &Src1 = MI.getOperand(1);
if (Src1.isReg() && Src1.getReg() == AMDGPU::EXEC_LO)
return MI.getOperand(0).getReg();
const MachineOperand &Src2 = MI.getOperand(2);
if (Src2.isReg() && Src2.getReg() == AMDGPU::EXEC_LO)
return MI.getOperand(0).getReg();
break;
}
}
@ -129,6 +153,22 @@ static unsigned getSaveExecOp(unsigned Opc) {
return AMDGPU::S_NOR_SAVEEXEC_B64;
case AMDGPU::S_XNOR_B64:
return AMDGPU::S_XNOR_SAVEEXEC_B64;
case AMDGPU::S_AND_B32:
return AMDGPU::S_AND_SAVEEXEC_B32;
case AMDGPU::S_OR_B32:
return AMDGPU::S_OR_SAVEEXEC_B32;
case AMDGPU::S_XOR_B32:
return AMDGPU::S_XOR_SAVEEXEC_B32;
case AMDGPU::S_ANDN2_B32:
return AMDGPU::S_ANDN2_SAVEEXEC_B32;
case AMDGPU::S_ORN2_B32:
return AMDGPU::S_ORN2_SAVEEXEC_B32;
case AMDGPU::S_NAND_B32:
return AMDGPU::S_NAND_SAVEEXEC_B32;
case AMDGPU::S_NOR_B32:
return AMDGPU::S_NOR_SAVEEXEC_B32;
case AMDGPU::S_XNOR_B32:
return AMDGPU::S_XNOR_SAVEEXEC_B32;
default:
return AMDGPU::INSTRUCTION_LIST_END;
}
@ -139,7 +179,8 @@ static unsigned getSaveExecOp(unsigned Opc) {
// these is expected per block.
static bool removeTerminatorBit(const SIInstrInfo &TII, MachineInstr &MI) {
switch (MI.getOpcode()) {
case AMDGPU::S_MOV_B64_term: {
case AMDGPU::S_MOV_B64_term:
case AMDGPU::S_MOV_B32_term: {
MI.setDesc(TII.get(AMDGPU::COPY));
return true;
}
@ -149,12 +190,30 @@ static bool removeTerminatorBit(const SIInstrInfo &TII, MachineInstr &MI) {
MI.setDesc(TII.get(AMDGPU::S_XOR_B64));
return true;
}
case AMDGPU::S_XOR_B32_term: {
// This is only a terminator to get the correct spill code placement during
// register allocation.
MI.setDesc(TII.get(AMDGPU::S_XOR_B32));
return true;
}
case AMDGPU::S_OR_B32_term: {
// This is only a terminator to get the correct spill code placement during
// register allocation.
MI.setDesc(TII.get(AMDGPU::S_OR_B32));
return true;
}
case AMDGPU::S_ANDN2_B64_term: {
// This is only a terminator to get the correct spill code placement during
// register allocation.
MI.setDesc(TII.get(AMDGPU::S_ANDN2_B64));
return true;
}
case AMDGPU::S_ANDN2_B32_term: {
// This is only a terminator to get the correct spill code placement during
// register allocation.
MI.setDesc(TII.get(AMDGPU::S_ANDN2_B32));
return true;
}
default:
return false;
}
@ -177,6 +236,7 @@ static MachineBasicBlock::reverse_iterator fixTerminators(
static MachineBasicBlock::reverse_iterator findExecCopy(
const SIInstrInfo &TII,
const GCNSubtarget &ST,
MachineBasicBlock &MBB,
MachineBasicBlock::reverse_iterator I,
unsigned CopyToExec) {
@ -184,7 +244,7 @@ static MachineBasicBlock::reverse_iterator findExecCopy(
auto E = MBB.rend();
for (unsigned N = 0; N <= InstLimit && I != E; ++I, ++N) {
unsigned CopyFromExec = isCopyFromExec(*I);
unsigned CopyFromExec = isCopyFromExec(*I, ST);
if (CopyFromExec != AMDGPU::NoRegister)
return I;
}
@ -211,6 +271,7 @@ bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
const SIRegisterInfo *TRI = ST.getRegisterInfo();
const SIInstrInfo *TII = ST.getInstrInfo();
unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
// Optimize sequences emitted for control flow lowering. They are originally
// emitted as the separate operations because spill code may need to be
@ -229,13 +290,13 @@ bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
if (I == E)
continue;
unsigned CopyToExec = isCopyToExec(*I);
unsigned CopyToExec = isCopyToExec(*I, ST);
if (CopyToExec == AMDGPU::NoRegister)
continue;
// Scan backwards to find the def.
auto CopyToExecInst = &*I;
auto CopyFromExecInst = findExecCopy(*TII, MBB, I, CopyToExec);
auto CopyFromExecInst = findExecCopy(*TII, ST, MBB, I, CopyToExec);
if (CopyFromExecInst == E) {
auto PrepareExecInst = std::next(I);
if (PrepareExecInst == E)
@ -245,7 +306,7 @@ bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
isLogicalOpOnExec(*PrepareExecInst) == CopyToExec) {
LLVM_DEBUG(dbgs() << "Fold exec copy: " << *PrepareExecInst);
PrepareExecInst->getOperand(0).setReg(AMDGPU::EXEC);
PrepareExecInst->getOperand(0).setReg(Exec);
LLVM_DEBUG(dbgs() << "into: " << *PrepareExecInst << '\n');
@ -268,7 +329,7 @@ bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
for (MachineBasicBlock::iterator J
= std::next(CopyFromExecInst->getIterator()), JE = I->getIterator();
J != JE; ++J) {
if (SaveExecInst && J->readsRegister(AMDGPU::EXEC, TRI)) {
if (SaveExecInst && J->readsRegister(Exec, TRI)) {
LLVM_DEBUG(dbgs() << "exec read prevents saveexec: " << *J << '\n');
// Make sure this is inserted after any VALU ops that may have been
// scheduled in between.
@ -352,7 +413,7 @@ bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
CopyToExecInst->eraseFromParent();
for (MachineInstr *OtherInst : OtherUseInsts) {
OtherInst->substituteRegister(CopyToExec, AMDGPU::EXEC,
OtherInst->substituteRegister(CopyToExec, Exec,
AMDGPU::NoSubRegister, *TRI);
}
}

View File

@ -82,13 +82,21 @@ FunctionPass *llvm::createSIOptimizeExecMaskingPreRAPass() {
return new SIOptimizeExecMaskingPreRA();
}
static bool isEndCF(const MachineInstr& MI, const SIRegisterInfo* TRI) {
static bool isEndCF(const MachineInstr &MI, const SIRegisterInfo *TRI,
const GCNSubtarget &ST) {
if (ST.isWave32()) {
return MI.getOpcode() == AMDGPU::S_OR_B32 &&
MI.modifiesRegister(AMDGPU::EXEC_LO, TRI);
}
return MI.getOpcode() == AMDGPU::S_OR_B64 &&
MI.modifiesRegister(AMDGPU::EXEC, TRI);
}
static bool isFullExecCopy(const MachineInstr& MI) {
if (MI.isCopy() && MI.getOperand(1).getReg() == AMDGPU::EXEC) {
static bool isFullExecCopy(const MachineInstr& MI, const GCNSubtarget& ST) {
unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
if (MI.isCopy() && MI.getOperand(1).getReg() == Exec) {
assert(MI.isFullCopy());
return true;
}
@ -97,24 +105,27 @@ static bool isFullExecCopy(const MachineInstr& MI) {
}
static unsigned getOrNonExecReg(const MachineInstr &MI,
const SIInstrInfo &TII) {
const SIInstrInfo &TII,
const GCNSubtarget& ST) {
unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
auto Op = TII.getNamedOperand(MI, AMDGPU::OpName::src1);
if (Op->isReg() && Op->getReg() != AMDGPU::EXEC)
if (Op->isReg() && Op->getReg() != Exec)
return Op->getReg();
Op = TII.getNamedOperand(MI, AMDGPU::OpName::src0);
if (Op->isReg() && Op->getReg() != AMDGPU::EXEC)
if (Op->isReg() && Op->getReg() != Exec)
return Op->getReg();
return AMDGPU::NoRegister;
}
static MachineInstr* getOrExecSource(const MachineInstr &MI,
const SIInstrInfo &TII,
const MachineRegisterInfo &MRI) {
auto SavedExec = getOrNonExecReg(MI, TII);
const MachineRegisterInfo &MRI,
const GCNSubtarget& ST) {
auto SavedExec = getOrNonExecReg(MI, TII, ST);
if (SavedExec == AMDGPU::NoRegister)
return nullptr;
auto SaveExecInst = MRI.getUniqueVRegDef(SavedExec);
if (!SaveExecInst || !isFullExecCopy(*SaveExecInst))
if (!SaveExecInst || !isFullExecCopy(*SaveExecInst, ST))
return nullptr;
return SaveExecInst;
}
@ -180,10 +191,11 @@ static unsigned optimizeVcndVcmpPair(MachineBasicBlock &MBB,
LiveIntervals *LIS) {
const SIRegisterInfo *TRI = ST.getRegisterInfo();
const SIInstrInfo *TII = ST.getInstrInfo();
const unsigned AndOpc = AMDGPU::S_AND_B64;
const unsigned Andn2Opc = AMDGPU::S_ANDN2_B64;
const unsigned CondReg = AMDGPU::VCC;
const unsigned ExecReg = AMDGPU::EXEC;
bool Wave32 = ST.isWave32();
const unsigned AndOpc = Wave32 ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
const unsigned Andn2Opc = Wave32 ? AMDGPU::S_ANDN2_B32 : AMDGPU::S_ANDN2_B64;
const unsigned CondReg = Wave32 ? AMDGPU::VCC_LO : AMDGPU::VCC;
const unsigned ExecReg = Wave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
auto I = llvm::find_if(MBB.terminators(), [](const MachineInstr &MI) {
unsigned Opc = MI.getOpcode();
@ -290,6 +302,7 @@ bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) {
MachineRegisterInfo &MRI = MF.getRegInfo();
LiveIntervals *LIS = &getAnalysis<LiveIntervals>();
DenseSet<unsigned> RecalcRegs({AMDGPU::EXEC_LO, AMDGPU::EXEC_HI});
unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
bool Changed = false;
for (MachineBasicBlock &MBB : MF) {
@ -368,19 +381,19 @@ bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) {
// Try to collapse adjacent endifs.
auto E = MBB.end();
auto Lead = skipDebugInstructionsForward(MBB.begin(), E);
if (MBB.succ_size() != 1 || Lead == E || !isEndCF(*Lead, TRI))
if (MBB.succ_size() != 1 || Lead == E || !isEndCF(*Lead, TRI, ST))
continue;
MachineBasicBlock *TmpMBB = &MBB;
auto NextLead = skipIgnoreExecInstsTrivialSucc(TmpMBB, std::next(Lead));
if (NextLead == TmpMBB->end() || !isEndCF(*NextLead, TRI) ||
!getOrExecSource(*NextLead, *TII, MRI))
if (NextLead == TmpMBB->end() || !isEndCF(*NextLead, TRI, ST) ||
!getOrExecSource(*NextLead, *TII, MRI, ST))
continue;
LLVM_DEBUG(dbgs() << "Redundant EXEC = S_OR_B64 found: " << *Lead << '\n');
auto SaveExec = getOrExecSource(*Lead, *TII, MRI);
unsigned SaveExecReg = getOrNonExecReg(*Lead, *TII);
auto SaveExec = getOrExecSource(*Lead, *TII, MRI, ST);
unsigned SaveExecReg = getOrNonExecReg(*Lead, *TII, ST);
for (auto &Op : Lead->operands()) {
if (Op.isReg())
RecalcRegs.insert(Op.getReg());
@ -414,7 +427,7 @@ bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) {
if (SafeToReplace) {
LIS->RemoveMachineInstrFromMaps(*SaveExec);
SaveExec->eraseFromParent();
MRI.replaceRegWith(SavedExec, AMDGPU::EXEC);
MRI.replaceRegWith(SavedExec, Exec);
LIS->removeInterval(SavedExec);
}
}

View File

@ -954,7 +954,8 @@ bool SIPeepholeSDWA::isConvertibleToSDWA(MachineInstr &MI,
if (TII->isVOPC(Opc)) {
if (!ST.hasSDWASdst()) {
const MachineOperand *SDst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst);
if (SDst && SDst->getReg() != AMDGPU::VCC)
if (SDst && (SDst->getReg() != AMDGPU::VCC &&
SDst->getReg() != AMDGPU::VCC_LO))
return false;
}
@ -1019,7 +1020,7 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
SDWAInst.add(*Dst);
} else {
assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::sdst) != -1);
SDWAInst.addReg(AMDGPU::VCC, RegState::Define);
SDWAInst.addReg(TRI->getVCC(), RegState::Define);
}
// Copy src0, initialize src0_modifiers. All sdwa instructions has src0 and

View File

@ -63,7 +63,8 @@ SIRegisterInfo::SIRegisterInfo(const GCNSubtarget &ST) :
SGPRPressureSets(getNumRegPressureSets()),
VGPRPressureSets(getNumRegPressureSets()),
SpillSGPRToVGPR(false),
SpillSGPRToSMEM(false) {
SpillSGPRToSMEM(false),
isWave32(ST.isWave32()) {
if (EnableSpillSGPRToSMEM && ST.hasScalarStores())
SpillSGPRToSMEM = true;
else if (EnableSpillSGPRToVGPR)
@ -184,6 +185,13 @@ BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
// Reserve null register - it shall never be allocated
reserveRegisterTuples(Reserved, AMDGPU::SGPR_NULL);
// Disallow vcc_hi allocation in wave32. It may be allocated but most likely
// will result in bugs.
if (isWave32) {
Reserved.set(AMDGPU::VCC);
Reserved.set(AMDGPU::VCC_HI);
}
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
unsigned MaxNumSGPRs = ST.getMaxNumSGPRs(MF);
@ -1706,6 +1714,25 @@ SIRegisterInfo::getConstrainedRegClassForOperand(const MachineOperand &MO,
}
}
unsigned SIRegisterInfo::getVCC() const {
return isWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC;
}
const TargetRegisterClass *
SIRegisterInfo::getRegClass(unsigned RCID) const {
switch ((int)RCID) {
case AMDGPU::SReg_1RegClassID:
return getBoolRC();
case AMDGPU::SReg_1_XEXECRegClassID:
return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
: &AMDGPU::SReg_64_XEXECRegClass;
case -1:
return nullptr;
default:
return AMDGPURegisterInfo::getRegClass(RCID);
}
}
// Find reaching register definition
MachineInstr *SIRegisterInfo::findReachingDef(unsigned Reg, unsigned SubReg,
MachineInstr &Use,

View File

@ -33,6 +33,7 @@ private:
BitVector VGPRPressureSets;
bool SpillSGPRToVGPR;
bool SpillSGPRToSMEM;
bool isWave32;
void classifyPressureSet(unsigned PSetID, unsigned Reg,
BitVector &PressureSets) const;
@ -231,6 +232,20 @@ public:
getConstrainedRegClassForOperand(const MachineOperand &MO,
const MachineRegisterInfo &MRI) const override;
const TargetRegisterClass *getBoolRC() const {
return isWave32 ? &AMDGPU::SReg_32_XM0RegClass
: &AMDGPU::SReg_64RegClass;
}
const TargetRegisterClass *getWaveMaskRegClass() const {
return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
: &AMDGPU::SReg_64_XEXECRegClass;
}
unsigned getVCC() const;
const TargetRegisterClass *getRegClass(unsigned RCID) const;
// Find reaching register definition
MachineInstr *findReachingDef(unsigned Reg, unsigned SubReg,
MachineInstr &Use,

View File

@ -551,6 +551,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
MachineRegisterInfo &MRI = MF.getRegInfo();
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
const SIInstrInfo *TII = ST.getInstrInfo();
unsigned VCCReg = ST.isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC;
std::vector<unsigned> I1Defs;
@ -726,10 +727,10 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
// So, instead of forcing the instruction to write to VCC, we provide
// a hint to the register allocator to use VCC and then we will run
// this pass again after RA and shrink it if it outputs to VCC.
MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, AMDGPU::VCC);
MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, VCCReg);
continue;
}
if (DstReg != AMDGPU::VCC)
if (DstReg != VCCReg)
continue;
}
@ -742,10 +743,10 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
continue;
unsigned SReg = Src2->getReg();
if (TargetRegisterInfo::isVirtualRegister(SReg)) {
MRI.setRegAllocationHint(SReg, 0, AMDGPU::VCC);
MRI.setRegAllocationHint(SReg, 0, VCCReg);
continue;
}
if (SReg != AMDGPU::VCC)
if (SReg != VCCReg)
continue;
}
@ -758,20 +759,24 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
AMDGPU::OpName::src2);
if (SDst) {
if (SDst->getReg() != AMDGPU::VCC) {
bool Next = false;
if (SDst->getReg() != VCCReg) {
if (TargetRegisterInfo::isVirtualRegister(SDst->getReg()))
MRI.setRegAllocationHint(SDst->getReg(), 0, AMDGPU::VCC);
continue;
MRI.setRegAllocationHint(SDst->getReg(), 0, VCCReg);
Next = true;
}
// All of the instructions with carry outs also have an SGPR input in
// src2.
if (Src2 && Src2->getReg() != AMDGPU::VCC) {
if (Src2 && Src2->getReg() != VCCReg) {
if (TargetRegisterInfo::isVirtualRegister(Src2->getReg()))
MRI.setRegAllocationHint(Src2->getReg(), 0, AMDGPU::VCC);
continue;
MRI.setRegAllocationHint(Src2->getReg(), 0, VCCReg);
Next = true;
}
if (Next)
continue;
}
// We can shrink this instruction

View File

@ -148,6 +148,7 @@ private:
CallingConv::ID CallingConv;
const SIInstrInfo *TII;
const SIRegisterInfo *TRI;
const GCNSubtarget *ST;
MachineRegisterInfo *MRI;
LiveIntervals *LIS;
@ -278,7 +279,7 @@ void SIWholeQuadMode::markInstructionUses(const MachineInstr &MI, char Flag,
// for VCC, which can appear as the (implicit) input of a uniform branch,
// e.g. when a loop counter is stored in a VGPR.
if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
if (Reg == AMDGPU::EXEC)
if (Reg == AMDGPU::EXEC || Reg == AMDGPU::EXEC_LO)
continue;
for (MCRegUnitIterator RegUnit(Reg, TRI); RegUnit.isValid(); ++RegUnit) {
@ -620,13 +621,16 @@ void SIWholeQuadMode::toExact(MachineBasicBlock &MBB,
MachineInstr *MI;
if (SaveWQM) {
MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::S_AND_SAVEEXEC_B64),
MI = BuildMI(MBB, Before, DebugLoc(), TII->get(ST->isWave32() ?
AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64),
SaveWQM)
.addReg(LiveMaskReg);
} else {
MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::S_AND_B64),
AMDGPU::EXEC)
.addReg(AMDGPU::EXEC)
unsigned Exec = ST->isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
MI = BuildMI(MBB, Before, DebugLoc(), TII->get(ST->isWave32() ?
AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64),
Exec)
.addReg(Exec)
.addReg(LiveMaskReg);
}
@ -638,13 +642,15 @@ void SIWholeQuadMode::toWQM(MachineBasicBlock &MBB,
unsigned SavedWQM) {
MachineInstr *MI;
unsigned Exec = ST->isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
if (SavedWQM) {
MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), AMDGPU::EXEC)
MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), Exec)
.addReg(SavedWQM);
} else {
MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::S_WQM_B64),
AMDGPU::EXEC)
.addReg(AMDGPU::EXEC);
MI = BuildMI(MBB, Before, DebugLoc(), TII->get(ST->isWave32() ?
AMDGPU::S_WQM_B32 : AMDGPU::S_WQM_B64),
Exec)
.addReg(Exec);
}
LIS->InsertMachineInstrInMaps(*MI);
@ -667,7 +673,8 @@ void SIWholeQuadMode::fromWWM(MachineBasicBlock &MBB,
MachineInstr *MI;
assert(SavedOrig);
MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::EXIT_WWM), AMDGPU::EXEC)
MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::EXIT_WWM),
ST->isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC)
.addReg(SavedOrig);
LIS->InsertMachineInstrInMaps(*MI);
}
@ -693,6 +700,7 @@ void SIWholeQuadMode::processBlock(MachineBasicBlock &MBB, unsigned LiveMaskReg,
bool WQMFromExec = isEntry;
char State = (isEntry || !(BI.InNeeds & StateWQM)) ? StateExact : StateWQM;
char NonWWMState = 0;
const TargetRegisterClass *BoolRC = TRI->getBoolRC();
auto II = MBB.getFirstNonPHI(), IE = MBB.end();
if (isEntry)
@ -780,13 +788,13 @@ void SIWholeQuadMode::processBlock(MachineBasicBlock &MBB, unsigned LiveMaskReg,
if (Needs == StateWWM) {
NonWWMState = State;
SavedNonWWMReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
SavedNonWWMReg = MRI->createVirtualRegister(BoolRC);
toWWM(MBB, Before, SavedNonWWMReg);
State = StateWWM;
} else {
if (State == StateWQM && (Needs & StateExact) && !(Needs & StateWQM)) {
if (!WQMFromExec && (OutNeeds & StateWQM))
SavedWQMReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
SavedWQMReg = MRI->createVirtualRegister(BoolRC);
toExact(MBB, Before, SavedWQMReg, LiveMaskReg);
State = StateExact;
@ -865,17 +873,18 @@ bool SIWholeQuadMode::runOnMachineFunction(MachineFunction &MF) {
LowerToCopyInstrs.clear();
CallingConv = MF.getFunction().getCallingConv();
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
ST = &MF.getSubtarget<GCNSubtarget>();
TII = ST.getInstrInfo();
TII = ST->getInstrInfo();
TRI = &TII->getRegisterInfo();
MRI = &MF.getRegInfo();
LIS = &getAnalysis<LiveIntervals>();
char GlobalFlags = analyzeFunction(MF);
unsigned LiveMaskReg = 0;
unsigned Exec = ST->isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
if (!(GlobalFlags & StateWQM)) {
lowerLiveMaskQueries(AMDGPU::EXEC);
lowerLiveMaskQueries(Exec);
if (!(GlobalFlags & StateWWM))
return !LiveMaskQueries.empty();
} else {
@ -884,10 +893,10 @@ bool SIWholeQuadMode::runOnMachineFunction(MachineFunction &MF) {
MachineBasicBlock::iterator EntryMI = Entry.getFirstNonPHI();
if (GlobalFlags & StateExact || !LiveMaskQueries.empty()) {
LiveMaskReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
LiveMaskReg = MRI->createVirtualRegister(TRI->getBoolRC());
MachineInstr *MI = BuildMI(Entry, EntryMI, DebugLoc(),
TII->get(AMDGPU::COPY), LiveMaskReg)
.addReg(AMDGPU::EXEC);
.addReg(Exec);
LIS->InsertMachineInstrInMaps(*MI);
}
@ -895,9 +904,10 @@ bool SIWholeQuadMode::runOnMachineFunction(MachineFunction &MF) {
if (GlobalFlags == StateWQM) {
// For a shader that needs only WQM, we can just set it once.
BuildMI(Entry, EntryMI, DebugLoc(), TII->get(AMDGPU::S_WQM_B64),
AMDGPU::EXEC)
.addReg(AMDGPU::EXEC);
BuildMI(Entry, EntryMI, DebugLoc(), TII->get(ST->isWave32() ?
AMDGPU::S_WQM_B32 : AMDGPU::S_WQM_B64),
Exec)
.addReg(Exec);
lowerCopyInstrs();
// EntryMI may become invalid here

View File

@ -1,5 +1,6 @@
# RUN: llc -march=amdgcn -mcpu=gfx700 -verify-machineinstrs -verify-machine-dom-info --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=COMMON,ADDR64
# RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -verify-machine-dom-info --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=COMMON,NO-ADDR64
# RUN: llc -march=amdgcn -mcpu=gfx700 -verify-machineinstrs -verify-machine-dom-info --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=W64,ADDR64
# RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -verify-machine-dom-info --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=W64,W64-NO-ADDR64
# RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -verify-machineinstrs -verify-machine-dom-info --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=W64,W64-NO-ADDR64
# Test that we correctly legalize VGPR Rsrc operands in MUBUF instructions.
#
@ -7,27 +8,50 @@
# needing a waterfall. For all other instruction variants, and when we are
# on non-ADDR64 hardware, we emit a waterfall loop.
# COMMON-LABEL: name: idxen
# COMMON-LABEL: bb.0:
# COMMON-NEXT: successors: %bb.1({{.*}})
# COMMON: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
# COMMON: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec
# COMMON-LABEL: bb.1:
# COMMON-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}})
# COMMON: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec
# COMMON: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec
# COMMON: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec
# COMMON: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec
# COMMON: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3
# COMMON: [[CMP0:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec
# COMMON: [[CMP1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec
# COMMON: [[CMP:%[0-9]+]]:sreg_64 = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc
# COMMON: [[TMPEXEC:%[0-9]+]]:sreg_64 = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec
# COMMON: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec
# COMMON: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc
# COMMON: S_CBRANCH_EXECNZ %bb.1, implicit $exec
# COMMON-LABEL bb.2:
# COMMON: $exec = S_MOV_B64 [[SAVEEXEC]]
# W64-LABEL: name: idxen
# W64-LABEL: bb.0:
# W64-NEXT: successors: %bb.1({{.*}})
# W64: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
# W64: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec
# W64-LABEL: bb.1:
# W64-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}})
# W64: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec
# W64: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec
# W64: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec
# W64: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec
# W64: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3
# W64: [[CMP0:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec
# W64: [[CMP1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec
# W64: [[CMP:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc
# W64: [[TMPEXEC:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec
# W64: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec
# W64: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc
# W64: S_CBRANCH_EXECNZ %bb.1, implicit $exec
# W64-LABEL bb.2:
# W64: $exec = S_MOV_B64 [[SAVEEXEC]]
# W32-LABEL: name: idxen
# W32-LABEL: bb.0:
# W32-NEXT: successors: %bb.1({{.*}})
# W32: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
# W32: [[SAVEEXEC:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
# W32-LABEL: bb.1:
# W32-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}})
# W32: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec
# W32: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec
# W32: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec
# W32: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec
# W32: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3
# W32: [[CMP0:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec
# W32: [[CMP1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec
# W32: [[CMP:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[CMP0]], [[CMP1]], implicit-def $scc
# W32: [[TMPEXEC:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec
# W32: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec
# TODO: S_XOR_B32_term should be `implicit-def $scc`
# W32: $exec_lo = S_XOR_B32_term $exec_lo, [[TMPEXEC]]
# W32: S_CBRANCH_EXECNZ %bb.1, implicit $exec
# W32-LABEL bb.2:
# W32: $exec_lo = S_MOV_B32 [[SAVEEXEC]]
---
name: idxen
liveins:
@ -53,27 +77,50 @@ body: |
S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
...
# COMMON-LABEL: name: offen
# COMMON-LABEL: bb.0:
# COMMON-NEXT: successors: %bb.1({{.*}})
# COMMON: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
# COMMON: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec
# COMMON-LABEL: bb.1:
# COMMON-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}})
# COMMON: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec
# COMMON: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec
# COMMON: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec
# COMMON: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec
# COMMON: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3
# COMMON: [[CMP0:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec
# COMMON: [[CMP1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec
# COMMON: [[CMP:%[0-9]+]]:sreg_64 = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc
# COMMON: [[TMPEXEC:%[0-9]+]]:sreg_64 = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec
# COMMON: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec
# COMMON: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc
# COMMON: S_CBRANCH_EXECNZ %bb.1, implicit $exec
# COMMON-LABEL bb.2:
# COMMON: $exec = S_MOV_B64 [[SAVEEXEC]]
# W64-LABEL: name: offen
# W64-LABEL: bb.0:
# W64-NEXT: successors: %bb.1({{.*}})
# W64: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
# W64: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec
# W64-LABEL: bb.1:
# W64-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}})
# W64: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec
# W64: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec
# W64: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec
# W64: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec
# W64: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3
# W64: [[CMP0:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec
# W64: [[CMP1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec
# W64: [[CMP:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc
# W64: [[TMPEXEC:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec
# W64: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec
# W64: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc
# W64: S_CBRANCH_EXECNZ %bb.1, implicit $exec
# W64-LABEL bb.2:
# W64: $exec = S_MOV_B64 [[SAVEEXEC]]
# W32-LABEL: name: offen
# W32-LABEL: bb.0:
# W32-NEXT: successors: %bb.1({{.*}})
# W32: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
# W32: [[SAVEEXEC:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
# W32-LABEL: bb.1:
# W32-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}})
# W32: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec
# W32: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec
# W32: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec
# W32: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec
# W32: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3
# W32: [[CMP0:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec
# W32: [[CMP1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec
# W32: [[CMP:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[CMP0]], [[CMP1]], implicit-def $scc
# W32: [[TMPEXEC:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec
# W32: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec
# TODO: S_XOR_B32_term should be `implicit-def $scc`
# W32: $exec_lo = S_XOR_B32_term $exec_lo, [[TMPEXEC]]
# W32: S_CBRANCH_EXECNZ %bb.1, implicit $exec
# W32-LABEL bb.2:
# W32: $exec_lo = S_MOV_B32 [[SAVEEXEC]]
---
name: offen
liveins:
@ -99,27 +146,50 @@ body: |
S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
...
# COMMON-LABEL: name: bothen
# COMMON-LABEL: bb.0:
# COMMON-NEXT: successors: %bb.1({{.*}})
# COMMON: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
# COMMON: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec
# COMMON-LABEL: bb.1:
# COMMON-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}})
# COMMON: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec
# COMMON: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec
# COMMON: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec
# COMMON: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec
# COMMON: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3
# COMMON: [[CMP0:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec
# COMMON: [[CMP1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec
# COMMON: [[CMP:%[0-9]+]]:sreg_64 = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc
# COMMON: [[TMPEXEC:%[0-9]+]]:sreg_64 = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec
# COMMON: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_BOTHEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec
# COMMON: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc
# COMMON: S_CBRANCH_EXECNZ %bb.1, implicit $exec
# COMMON-LABEL bb.2:
# COMMON: $exec = S_MOV_B64 [[SAVEEXEC]]
# W64-LABEL: name: bothen
# W64-LABEL: bb.0:
# W64-NEXT: successors: %bb.1({{.*}})
# W64: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
# W64: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec
# W64-LABEL: bb.1:
# W64-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}})
# W64: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec
# W64: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec
# W64: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec
# W64: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec
# W64: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3
# W64: [[CMP0:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec
# W64: [[CMP1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec
# W64: [[CMP:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc
# W64: [[TMPEXEC:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec
# W64: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_BOTHEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec
# W64: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc
# W64: S_CBRANCH_EXECNZ %bb.1, implicit $exec
# W64-LABEL bb.2:
# W64: $exec = S_MOV_B64 [[SAVEEXEC]]
# W32-LABEL: name: bothen
# W32-LABEL: bb.0:
# W32-NEXT: successors: %bb.1({{.*}})
# W32: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
# W32: [[SAVEEXEC:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
# W32-LABEL: bb.1:
# W32-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}})
# W32: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec
# W32: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec
# W32: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec
# W32: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec
# W32: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3
# W32: [[CMP0:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec
# W32: [[CMP1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec
# W32: [[CMP:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[CMP0]], [[CMP1]], implicit-def $scc
# W32: [[TMPEXEC:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec
# W32: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_BOTHEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec
# TODO: S_XOR_B32_term should be `implicit-def $scc`
# W32: $exec_lo = S_XOR_B32_term $exec_lo, [[TMPEXEC]]
# W32: S_CBRANCH_EXECNZ %bb.1, implicit $exec
# W32-LABEL bb.2:
# W32: $exec_lo = S_MOV_B32 [[SAVEEXEC]]
---
name: bothen
liveins:
@ -145,17 +215,17 @@ body: |
S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
...
# COMMON-LABEL: name: addr64
# COMMON-LABEL: bb.0:
# COMMON: %12:vreg_64 = COPY %8.sub0_sub1
# COMMON: %13:sreg_64 = S_MOV_B64 0
# COMMON: %14:sgpr_32 = S_MOV_B32 0
# COMMON: %15:sgpr_32 = S_MOV_B32 61440
# COMMON: %16:sreg_128 = REG_SEQUENCE %13, %subreg.sub0_sub1, %14, %subreg.sub2, %15, %subreg.sub3
# COMMON: %9:vgpr_32 = V_ADD_I32_e32 %12.sub0, %4.sub0, implicit-def $vcc, implicit $exec
# COMMON: %10:vgpr_32 = V_ADDC_U32_e32 %12.sub1, %4.sub1, implicit-def $vcc, implicit $vcc, implicit $exec
# COMMON: %11:vreg_64 = REG_SEQUENCE %9, %subreg.sub0, %10, %subreg.sub1
# COMMON: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_ADDR64 %11, killed %16, 0, 0, 0, 0, 0, 0, implicit $exec
# ADDR64-LABEL: name: addr64
# ADDR64-LABEL: bb.0:
# ADDR64: %12:vreg_64 = COPY %8.sub0_sub1
# ADDR64: %13:sreg_64 = S_MOV_B64 0
# ADDR64: %14:sgpr_32 = S_MOV_B32 0
# ADDR64: %15:sgpr_32 = S_MOV_B32 61440
# ADDR64: %16:sreg_128 = REG_SEQUENCE %13, %subreg.sub0_sub1, %14, %subreg.sub2, %15, %subreg.sub3
# ADDR64: %9:vgpr_32 = V_ADD_I32_e32 %12.sub0, %4.sub0, implicit-def $vcc, implicit $exec
# ADDR64: %10:vgpr_32 = V_ADDC_U32_e32 %12.sub1, %4.sub1, implicit-def $vcc, implicit $vcc, implicit $exec
# ADDR64: %11:vreg_64 = REG_SEQUENCE %9, %subreg.sub0, %10, %subreg.sub1
# ADDR64: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_ADDR64 %11, killed %16, 0, 0, 0, 0, 0, 0, implicit $exec
---
name: addr64
liveins:
@ -181,28 +251,49 @@ body: |
S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
...
# COMMON-LABEL: name: offset
# COMMON-LABEL: bb.0:
# W64-LABEL: name: offset
# W64-LABEL: bb.0:
# NO-ADDR64-NEXT: successors: %bb.1({{.*}})
# NO-ADDR64: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
# NO-ADDR64: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec
# NO-ADDR64-LABEL: bb.1:
# NO-ADDR64-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}})
# NO-ADDR64: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec
# NO-ADDR64: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec
# NO-ADDR64: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec
# NO-ADDR64: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec
# NO-ADDR64: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3
# NO-ADDR64: [[CMP0:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec
# NO-ADDR64: [[CMP1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec
# NO-ADDR64: [[CMP:%[0-9]+]]:sreg_64 = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc
# NO-ADDR64: [[TMPEXEC:%[0-9]+]]:sreg_64 = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec
# NO-ADDR64: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFSET killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec
# NO-ADDR64: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc
# NO-ADDR64: S_CBRANCH_EXECNZ %bb.1, implicit $exec
# NO-ADDR64-LABEL bb.2:
# NO-ADDR64: $exec = S_MOV_B64 [[SAVEEXEC]]
# W64-NO-ADDR64: successors: %bb.1({{.*}})
# W64-NO-ADDR64: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
# W64-NO-ADDR64: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec
# W64-NO-ADDR64-LABEL: bb.1:
# W64-NO-ADDR64-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}})
# W64-NO-ADDR64: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec
# W64-NO-ADDR64: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec
# W64-NO-ADDR64: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec
# W64-NO-ADDR64: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec
# W64-NO-ADDR64: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3
# W64-NO-ADDR64: [[CMP0:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec
# W64-NO-ADDR64: [[CMP1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec
# W64-NO-ADDR64: [[CMP:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc
# W64-NO-ADDR64: [[TMPEXEC:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec
# W64-NO-ADDR64: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFSET killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec
# W64-NO-ADDR64: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc
# W64-NO-ADDR64: S_CBRANCH_EXECNZ %bb.1, implicit $exec
# W64-NO-ADDR64-LABEL bb.2:
# W64-NO-ADDR64: $exec = S_MOV_B64 [[SAVEEXEC]]
# W32: successors: %bb.1({{.*}})
# W32: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
# W32: [[SAVEEXEC:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
# W32-LABEL: bb.1:
# W32-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}})
# W32: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec
# W32: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec
# W32: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec
# W32: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec
# W32: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3
# W32: [[CMP0:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec
# W32: [[CMP1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec
# W32: [[CMP:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[CMP0]], [[CMP1]], implicit-def $scc
# W32: [[TMPEXEC:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec
# W32: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFSET killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec
# TODO: S_XOR_B32_term should be `implicit-def $scc`
# W32: $exec_lo = S_XOR_B32_term $exec_lo, [[TMPEXEC]]
# W32: S_CBRANCH_EXECNZ %bb.1, implicit $exec
# W32-LABEL bb.2:
# W32: $exec_lo = S_MOV_B32 [[SAVEEXEC]]
# ADDR64: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
# ADDR64: [[RSRCPTR:%[0-9]+]]:vreg_64 = COPY [[VRSRC]].sub0_sub1