Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM

Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).

Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor

Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&

Depends on D65919

Reviewers: arsenm, bogner, craig.topper, RKSimon

Reviewed By: arsenm

Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D65962

llvm-svn: 369041
This commit is contained in:
Daniel Sanders 2019-08-15 19:22:08 +00:00
parent 8e987702b1
commit 0c47611131
239 changed files with 1887 additions and 1891 deletions

View File

@ -232,7 +232,7 @@ bool AggressiveAntiDepBreaker::IsImplicitDefUse(MachineInstr &MI,
if (!MO.isReg() || !MO.isImplicit())
return false;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Reg == 0)
return false;
@ -252,7 +252,7 @@ void AggressiveAntiDepBreaker::GetPassthruRegs(
if (!MO.isReg()) continue;
if ((MO.isDef() && MI.isRegTiedToUseOperand(i)) ||
IsImplicitDefUse(MI, MO)) {
const unsigned Reg = MO.getReg();
const Register Reg = MO.getReg();
for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs)
PassthruRegs.insert(*SubRegs);
@ -365,7 +365,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction(
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || !MO.isDef()) continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Reg == 0) continue;
HandleLastUse(Reg, Count + 1, "", "\tDead Def: ", "\n");
@ -375,7 +375,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction(
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || !MO.isDef()) continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Reg == 0) continue;
LLVM_DEBUG(dbgs() << " " << printReg(Reg, TRI) << "=g"
@ -418,7 +418,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction(
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || !MO.isDef()) continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Reg == 0) continue;
// Ignore KILLs and passthru registers for liveness...
if (MI.isKill() || (PassthruRegs.count(Reg) != 0))
@ -471,7 +471,7 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr &MI,
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || !MO.isUse()) continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Reg == 0) continue;
LLVM_DEBUG(dbgs() << " " << printReg(Reg, TRI) << "=g"
@ -506,7 +506,7 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr &MI,
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Reg == 0) continue;
if (FirstReg != 0) {

View File

@ -783,7 +783,7 @@ static void emitComments(const MachineInstr &MI, raw_ostream &CommentOS) {
/// emitImplicitDef - This method emits the specified machine instruction
/// that is an implicit def.
void AsmPrinter::emitImplicitDef(const MachineInstr *MI) const {
unsigned RegNo = MI->getOperand(0).getReg();
Register RegNo = MI->getOperand(0).getReg();
SmallString<128> Str;
raw_svector_ostream OS(Str);

View File

@ -177,13 +177,13 @@ static void handleNewDebugValue(InlinedEntity Var, const MachineInstr &DV,
IndicesToErase.push_back(Index);
Entry.endEntry(NewIndex);
}
if (unsigned Reg = isDescribedByReg(DV))
if (Register Reg = isDescribedByReg(DV))
TrackedRegs[Reg] |= !Overlaps;
}
// If the new debug value is described by a register, add tracking of
// that register if it is not already tracked.
if (unsigned NewReg = isDescribedByReg(DV)) {
if (Register NewReg = isDescribedByReg(DV)) {
if (!TrackedRegs.count(NewReg))
addRegDescribedVar(RegVars, NewReg, Var);
LiveEntries[Var].insert(NewIndex);
@ -234,7 +234,7 @@ void llvm::calculateDbgEntityHistory(const MachineFunction *MF,
DbgLabelInstrMap &DbgLabels) {
const TargetLowering *TLI = MF->getSubtarget().getTargetLowering();
unsigned SP = TLI->getStackPointerRegisterToSaveRestore();
unsigned FrameReg = TRI->getFrameRegister(*MF);
Register FrameReg = TRI->getFrameRegister(*MF);
RegDescribedVarsMap RegVars;
DbgValueEntriesMap LiveEntries;
for (const auto &MBB : *MF) {

View File

@ -660,9 +660,9 @@ static void collectCallSiteParameters(const MachineInstr *CallMI,
DbgValueLoc DbgLocVal(ParamValue->second, Val);
finishCallSiteParam(DbgLocVal, Reg);
} else if (ParamValue->first->isReg()) {
unsigned RegLoc = ParamValue->first->getReg();
Register RegLoc = ParamValue->first->getReg();
unsigned SP = TLI->getStackPointerRegisterToSaveRestore();
unsigned FP = TRI->getFrameRegister(*MF);
Register FP = TRI->getFrameRegister(*MF);
bool IsSPorFP = (RegLoc == SP) || (RegLoc == FP);
if (TRI->isCalleeSavedPhysReg(RegLoc, *MF) || IsSPorFP) {
DbgValueLoc DbgLocVal(ParamValue->second,

View File

@ -1871,7 +1871,7 @@ MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB,
for (const MachineOperand &MO : Loc->operands()) {
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg)
continue;
if (MO.isUse()) {
@ -1909,7 +1909,7 @@ MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB,
return Loc;
if (!MO.isReg() || MO.isUse())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg)
continue;
if (Uses.count(Reg)) {
@ -1937,7 +1937,7 @@ MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB,
for (const MachineOperand &MO : PI->operands()) {
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg)
continue;
if (MO.isUse()) {
@ -2010,7 +2010,7 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
}
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg)
continue;
if (MO.isDef()) {
@ -2060,7 +2060,7 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
for (const MachineOperand &MO : TIB->operands()) {
if (!MO.isReg() || !MO.isUse() || !MO.isKill())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg)
continue;
if (!AllDefsSet.count(Reg)) {
@ -2078,7 +2078,7 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
for (const MachineOperand &MO : TIB->operands()) {
if (!MO.isReg() || !MO.isDef() || MO.isDead())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg || Register::isVirtualRegister(Reg))
continue;
addRegAndItsAliases(Reg, TRI, ActiveDefsSet);

View File

@ -109,7 +109,7 @@ bool BreakFalseDeps::pickBestRegisterForUndef(MachineInstr *MI, unsigned OpIdx,
MachineOperand &MO = MI->getOperand(OpIdx);
assert(MO.isUndef() && "Expected undef machine operand");
unsigned OriginalReg = MO.getReg();
Register OriginalReg = MO.getReg();
// Update only undef operands that have reg units that are mapped to one root.
for (MCRegUnitIterator Unit(OriginalReg, TRI); Unit.isValid(); ++Unit) {
@ -162,7 +162,7 @@ bool BreakFalseDeps::pickBestRegisterForUndef(MachineInstr *MI, unsigned OpIdx,
bool BreakFalseDeps::shouldBreakDependence(MachineInstr *MI, unsigned OpIdx,
unsigned Pref) {
unsigned reg = MI->getOperand(OpIdx).getReg();
Register reg = MI->getOperand(OpIdx).getReg();
unsigned Clearance = RDA->getClearance(MI, reg);
LLVM_DEBUG(dbgs() << "Clearance: " << Clearance << ", want " << Pref);

View File

@ -244,7 +244,7 @@ float VirtRegAuxInfo::weightCalcHelper(LiveInterval &li, SlotIndex *start,
// Get allocation hints from copies.
if (!mi->isCopy())
continue;
unsigned hint = copyHint(mi, li.reg, tri, mri);
Register hint = copyHint(mi, li.reg, tri, mri);
if (!hint)
continue;
// Force hweight onto the stack so that x86 doesn't add hidden precision,

View File

@ -187,7 +187,7 @@ void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr &MI) {
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Reg == 0) continue;
const TargetRegisterClass *NewRC = nullptr;
@ -272,7 +272,7 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr &MI, unsigned Count) {
}
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Reg == 0) continue;
if (!MO.isDef()) continue;
@ -303,7 +303,7 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr &MI, unsigned Count) {
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Reg == 0) continue;
if (!MO.isUse()) continue;
@ -612,7 +612,7 @@ BreakAntiDependencies(const std::vector<SUnit> &SUnits,
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Reg == 0) continue;
if (MO.isUse() && TRI->regsOverlap(AntiDepReg, Reg)) {
AntiDepReg = 0;

View File

@ -75,7 +75,7 @@ bool DeadMachineInstructionElim::isDead(const MachineInstr *MI) const {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isDef()) {
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Register::isPhysicalRegister(Reg)) {
// Don't delete live physreg defs, or any reserved register defs.
if (LivePhysRegs.test(Reg) || MRI->isReserved(Reg))
@ -140,7 +140,7 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isDef()) {
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Register::isPhysicalRegister(Reg)) {
// Check the subreg set, not the alias set, because a def
// of a super-register may still be partially live after
@ -159,7 +159,7 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isUse()) {
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Register::isPhysicalRegister(Reg)) {
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
LivePhysRegs.set(*AI);

View File

@ -154,7 +154,7 @@ static bool isCrossCopy(const MachineRegisterInfo &MRI,
const TargetRegisterClass *DstRC,
const MachineOperand &MO) {
assert(lowersToCopies(MI));
unsigned SrcReg = MO.getReg();
Register SrcReg = MO.getReg();
const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
if (DstRC == SrcRC)
return false;
@ -194,7 +194,7 @@ void DetectDeadLanes::addUsedLanesOnOperand(const MachineOperand &MO,
LaneBitmask UsedLanes) {
if (!MO.readsReg())
return;
unsigned MOReg = MO.getReg();
Register MOReg = MO.getReg();
if (!Register::isVirtualRegister(MOReg))
return;
@ -250,7 +250,7 @@ LaneBitmask DetectDeadLanes::transferUsedLanes(const MachineInstr &MI,
return MO2UsedLanes;
const MachineOperand &Def = MI.getOperand(0);
unsigned DefReg = Def.getReg();
Register DefReg = Def.getReg();
const TargetRegisterClass *RC = MRI->getRegClass(DefReg);
LaneBitmask MO1UsedLanes;
if (RC->CoveredBySubRegs)
@ -285,7 +285,7 @@ void DetectDeadLanes::transferDefinedLanesStep(const MachineOperand &Use,
if (MI.getOpcode() == TargetOpcode::PATCHPOINT)
return;
const MachineOperand &Def = *MI.defs().begin();
unsigned DefReg = Def.getReg();
Register DefReg = Def.getReg();
if (!Register::isVirtualRegister(DefReg))
return;
unsigned DefRegIdx = Register::virtReg2Index(DefReg);
@ -377,7 +377,7 @@ LaneBitmask DetectDeadLanes::determineInitialDefinedLanes(unsigned Reg) {
for (const MachineOperand &MO : DefMI.uses()) {
if (!MO.isReg() || !MO.readsReg())
continue;
unsigned MOReg = MO.getReg();
Register MOReg = MO.getReg();
if (!MOReg)
continue;
@ -428,7 +428,7 @@ LaneBitmask DetectDeadLanes::determineInitialUsedLanes(unsigned Reg) {
if (lowersToCopies(UseMI)) {
assert(UseMI.getDesc().getNumDefs() == 1);
const MachineOperand &Def = *UseMI.defs().begin();
unsigned DefReg = Def.getReg();
Register DefReg = Def.getReg();
// The used lanes of COPY-like instruction operands are determined by the
// following dataflow analysis.
if (Register::isVirtualRegister(DefReg)) {
@ -470,7 +470,7 @@ bool DetectDeadLanes::isUndefInput(const MachineOperand &MO,
if (!lowersToCopies(MI))
return false;
const MachineOperand &Def = MI.getOperand(0);
unsigned DefReg = Def.getReg();
Register DefReg = Def.getReg();
if (!Register::isVirtualRegister(DefReg))
return false;
unsigned DefRegIdx = Register::virtReg2Index(DefReg);
@ -482,7 +482,7 @@ bool DetectDeadLanes::isUndefInput(const MachineOperand &MO,
if (UsedLanes.any())
return false;
unsigned MOReg = MO.getReg();
Register MOReg = MO.getReg();
if (Register::isVirtualRegister(MOReg)) {
const TargetRegisterClass *DstRC = MRI->getRegClass(DefReg);
*CrossCopy = isCrossCopy(*MRI, MI, DstRC, MO);
@ -536,7 +536,7 @@ bool DetectDeadLanes::runOnce(MachineFunction &MF) {
for (MachineOperand &MO : MI.operands()) {
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Register::isVirtualRegister(Reg))
continue;
unsigned RegIdx = Register::virtReg2Index(Reg);

View File

@ -232,7 +232,7 @@ bool SSAIfConv::canSpeculateInstrs(MachineBasicBlock *MBB) {
}
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
// Remember clobbered regunits.
if (MO.isDef() && Register::isPhysicalRegister(Reg))
@ -288,7 +288,7 @@ bool SSAIfConv::findInsertionPoint() {
// We're ignoring regmask operands. That is conservatively correct.
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Register::isPhysicalRegister(Reg))
continue;
// I clobbers Reg, so it isn't live before I.
@ -467,7 +467,7 @@ void SSAIfConv::replacePHIInstrs() {
for (unsigned i = 0, e = PHIs.size(); i != e; ++i) {
PHIInfo &PI = PHIs[i];
LLVM_DEBUG(dbgs() << "If-converting " << *PI.PHI);
unsigned DstReg = PI.PHI->getOperand(0).getReg();
Register DstReg = PI.PHI->getOperand(0).getReg();
TII->insertSelect(*Head, FirstTerm, HeadDL, DstReg, Cond, PI.TReg, PI.FReg);
LLVM_DEBUG(dbgs() << " --> " << *std::prev(FirstTerm));
PI.PHI->eraseFromParent();
@ -494,7 +494,7 @@ void SSAIfConv::rewritePHIOperands() {
// equal.
DstReg = PI.TReg;
} else {
unsigned PHIDst = PI.PHI->getOperand(0).getReg();
Register PHIDst = PI.PHI->getOperand(0).getReg();
DstReg = MRI->createVirtualRegister(MRI->getRegClass(PHIDst));
TII->insertSelect(*Head, FirstTerm, HeadDL,
DstReg, Cond, PI.TReg, PI.FReg);

View File

@ -79,13 +79,13 @@ bool ExpandPostRA::LowerSubregToReg(MachineInstr *MI) {
(MI->getOperand(2).isReg() && MI->getOperand(2).isUse()) &&
MI->getOperand(3).isImm() && "Invalid subreg_to_reg");
unsigned DstReg = MI->getOperand(0).getReg();
unsigned InsReg = MI->getOperand(2).getReg();
Register DstReg = MI->getOperand(0).getReg();
Register InsReg = MI->getOperand(2).getReg();
assert(!MI->getOperand(2).getSubReg() && "SubIdx on physreg?");
unsigned SubIdx = MI->getOperand(3).getImm();
assert(SubIdx != 0 && "Invalid index for insert_subreg");
unsigned DstSubReg = TRI->getSubReg(DstReg, SubIdx);
Register DstSubReg = TRI->getSubReg(DstReg, SubIdx);
assert(Register::isPhysicalRegister(DstReg) &&
"Insert destination must be in a physical register");

View File

@ -332,7 +332,7 @@ GISelInstProfileBuilder::addNodeIDFlag(unsigned Flag) const {
const GISelInstProfileBuilder &GISelInstProfileBuilder::addNodeIDMachineOperand(
const MachineOperand &MO) const {
if (MO.isReg()) {
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!MO.isDef())
addNodeIDRegNum(Reg);
LLT Ty = MRI.getType(Reg);

View File

@ -62,8 +62,8 @@ bool CombinerHelper::tryCombineCopy(MachineInstr &MI) {
bool CombinerHelper::matchCombineCopy(MachineInstr &MI) {
if (MI.getOpcode() != TargetOpcode::COPY)
return false;
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = MI.getOperand(1).getReg();
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
LLT DstTy = MRI.getType(DstReg);
LLT SrcTy = MRI.getType(SrcReg);
// Simple Copy Propagation.
@ -73,8 +73,8 @@ bool CombinerHelper::matchCombineCopy(MachineInstr &MI) {
return false;
}
void CombinerHelper::applyCombineCopy(MachineInstr &MI) {
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = MI.getOperand(1).getReg();
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
MI.eraseFromParent();
replaceRegWith(MRI, DstReg, SrcReg);
}
@ -286,7 +286,7 @@ void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI,
// up the type and extend so that it uses the preferred use.
if (UseMI->getOpcode() == Preferred.ExtendOpcode ||
UseMI->getOpcode() == TargetOpcode::G_ANYEXT) {
unsigned UseDstReg = UseMI->getOperand(0).getReg();
Register UseDstReg = UseMI->getOperand(0).getReg();
MachineOperand &UseSrcMO = UseMI->getOperand(1);
const LLT &UseDstTy = MRI.getType(UseDstReg);
if (UseDstReg != ChosenDstReg) {
@ -883,8 +883,8 @@ bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
unsigned DstAlign = MemOp->getBaseAlignment();
unsigned SrcAlign = 0;
unsigned Dst = MI.getOperand(1).getReg();
unsigned Src = MI.getOperand(2).getReg();
Register Dst = MI.getOperand(1).getReg();
Register Src = MI.getOperand(2).getReg();
Register Len = MI.getOperand(3).getReg();
if (ID != Intrinsic::memset) {

View File

@ -160,8 +160,8 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
--MII;
if (MI.getOpcode() != TargetOpcode::COPY)
continue;
unsigned SrcReg = MI.getOperand(1).getReg();
unsigned DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
Register DstReg = MI.getOperand(0).getReg();
if (Register::isVirtualRegister(SrcReg) &&
Register::isVirtualRegister(DstReg)) {
auto SrcRC = MRI.getRegClass(SrcReg);

View File

@ -79,7 +79,7 @@ bool Localizer::shouldLocalize(const MachineInstr &MI) {
return true;
case TargetOpcode::G_GLOBAL_VALUE: {
unsigned RematCost = TTI->getGISelRematGlobalCost();
unsigned Reg = MI.getOperand(0).getReg();
Register Reg = MI.getOperand(0).getReg();
unsigned MaxUses = maxUses(RematCost);
if (MaxUses == UINT_MAX)
return true; // Remats are "free" so always localize.
@ -121,7 +121,7 @@ bool Localizer::localizeInterBlock(MachineFunction &MF,
LLVM_DEBUG(dbgs() << "Should localize: " << MI);
assert(MI.getDesc().getNumDefs() == 1 &&
"More than one definition not supported yet");
unsigned Reg = MI.getOperand(0).getReg();
Register Reg = MI.getOperand(0).getReg();
// Check if all the users of MI are local.
// We are going to invalidation the list of use operands, so we
// can't use range iterator.
@ -151,7 +151,7 @@ bool Localizer::localizeInterBlock(MachineFunction &MF,
LocalizedMI);
// Set a new register for the definition.
unsigned NewReg = MRI->createGenericVirtualRegister(MRI->getType(Reg));
Register NewReg = MRI->createGenericVirtualRegister(MRI->getType(Reg));
MRI->setRegClassOrRegBank(NewReg, MRI->getRegClassOrRegBank(Reg));
LocalizedMI->getOperand(0).setReg(NewReg);
NewVRegIt =
@ -177,7 +177,7 @@ bool Localizer::localizeIntraBlock(LocalizedSetVecT &LocalizedInstrs) {
// many users, but this case may be better served by regalloc improvements.
for (MachineInstr *MI : LocalizedInstrs) {
unsigned Reg = MI->getOperand(0).getReg();
Register Reg = MI->getOperand(0).getReg();
MachineBasicBlock &MBB = *MI->getParent();
// All of the user MIs of this reg.
SmallPtrSet<MachineInstr *, 32> Users;

View File

@ -43,7 +43,7 @@ unsigned llvm::constrainOperandRegClass(
const RegisterBankInfo &RBI, MachineInstr &InsertPt,
const TargetRegisterClass &RegClass, const MachineOperand &RegMO,
unsigned OpIdx) {
unsigned Reg = RegMO.getReg();
Register Reg = RegMO.getReg();
// Assume physical registers are properly constrained.
assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
@ -72,7 +72,7 @@ unsigned llvm::constrainOperandRegClass(
MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
const MachineOperand &RegMO, unsigned OpIdx) {
unsigned Reg = RegMO.getReg();
Register Reg = RegMO.getReg();
// Assume physical registers are properly constrained.
assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
@ -128,7 +128,7 @@ bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
assert(MO.isReg() && "Unsupported non-reg operand");
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
// Physical registers don't need to be constrained.
if (Register::isPhysicalRegister(Reg))
continue;
@ -168,7 +168,7 @@ bool llvm::isTriviallyDead(const MachineInstr &MI,
if (!MO.isReg() || !MO.isDef())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg))
return false;
}
@ -288,7 +288,7 @@ llvm::MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
if (!DstTy.isValid())
return nullptr;
while (DefMI->getOpcode() == TargetOpcode::COPY) {
unsigned SrcReg = DefMI->getOperand(1).getReg();
Register SrcReg = DefMI->getOperand(1).getReg();
auto SrcTy = MRI.getType(SrcReg);
if (!SrcTy.isValid() || SrcTy != DstTy)
break;

View File

@ -1815,7 +1815,7 @@ bool IfConverter::IfConvertDiamondCommon(
for (const MachineOperand &MO : FI.operands()) {
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg)
continue;
if (MO.isDef()) {
@ -1983,7 +1983,7 @@ static bool MaySpeculate(const MachineInstr &MI,
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg)
continue;
if (MO.isDef() && !LaterRedefs.count(Reg))

View File

@ -278,12 +278,12 @@ bool ImplicitNullChecks::canReorder(const MachineInstr *A,
if (!(MOA.isReg() && MOA.getReg()))
continue;
unsigned RegA = MOA.getReg();
Register RegA = MOA.getReg();
for (auto MOB : B->operands()) {
if (!(MOB.isReg() && MOB.getReg()))
continue;
unsigned RegB = MOB.getReg();
Register RegB = MOB.getReg();
if (TRI->regsOverlap(RegA, RegB) && (MOA.isDef() || MOB.isDef()))
return false;
@ -517,7 +517,7 @@ bool ImplicitNullChecks::analyzeBlockForNullChecks(
//
// we must ensure that there are no instructions between the 'test' and
// conditional jump that modify %rax.
const unsigned PointerReg = MBP.LHS.getReg();
const Register PointerReg = MBP.LHS.getReg();
assert(MBP.ConditionDef->getParent() == &MBB && "Should be in basic block");
@ -689,7 +689,7 @@ void ImplicitNullChecks::rewriteNullChecks(
for (const MachineOperand &MO : FaultingInstr->operands()) {
if (!MO.isReg() || !MO.isDef())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg || MBB->isLiveIn(Reg))
continue;
MBB->addLiveIn(Reg);

View File

@ -376,7 +376,7 @@ bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy");
#endif
unsigned SrcReg = CopyMI.getOperand(1).getReg();
Register SrcReg = CopyMI.getOperand(1).getReg();
LiveInterval &SrcLI = LIS.getInterval(SrcReg);
VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);
LiveQueryResult SrcQ = SrcLI.Query(Idx);
@ -844,7 +844,7 @@ foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
if (!MO->isReg())
continue;
unsigned Reg = MO->getReg();
Register Reg = MO->getReg();
if (!Reg || Register::isVirtualRegister(Reg) || MRI.isReserved(Reg)) {
continue;
}

View File

@ -898,8 +898,8 @@ void LiveDebugValues::transferRegisterCopy(MachineInstr &MI,
return false;
};
unsigned SrcReg = SrcRegOp->getReg();
unsigned DestReg = DestRegOp->getReg();
Register SrcReg = SrcRegOp->getReg();
Register DestReg = DestRegOp->getReg();
// We want to recognize instructions where destination register is callee
// saved register. If register that could be clobbered by the call is
@ -1182,7 +1182,7 @@ bool LiveDebugValues::ExtendRanges(MachineFunction &MF) {
const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();
unsigned SP = TLI->getStackPointerRegisterToSaveRestore();
unsigned FP = TRI->getFrameRegister(MF);
Register FP = TRI->getFrameRegister(MF);
auto IsRegOtherThanSPAndFP = [&](const MachineOperand &Op) -> bool {
return Op.isReg() && Op.getReg() != SP && Op.getReg() != FP;
};

View File

@ -607,7 +607,7 @@ bool LDVImpl::handleDebugValue(MachineInstr &MI, SlotIndex Idx) {
bool Discard = false;
if (MI.getOperand(0).isReg() &&
Register::isVirtualRegister(MI.getOperand(0).getReg())) {
const unsigned Reg = MI.getOperand(0).getReg();
const Register Reg = MI.getOperand(0).getReg();
if (!LIS->hasInterval(Reg)) {
// The DBG_VALUE is described by a virtual register that does not have a
// live interval. Discard the DBG_VALUE.
@ -768,7 +768,7 @@ void UserValue::addDefsFromCopies(
// Copies of the full value.
if (MO.getSubReg() || !MI->isCopy())
continue;
unsigned DstReg = MI->getOperand(0).getReg();
Register DstReg = MI->getOperand(0).getReg();
// Don't follow copies to physregs. These are usually setting up call
// arguments, and the argument registers are always call clobbered. We are
@ -1162,7 +1162,7 @@ void UserValue::rewriteLocations(VirtRegMap &VRM, const MachineFunction &MF,
// Only virtual registers are rewritten.
if (Loc.isReg() && Loc.getReg() &&
Register::isVirtualRegister(Loc.getReg())) {
unsigned VirtReg = Loc.getReg();
Register VirtReg = Loc.getReg();
if (VRM.isAssignedReg(VirtReg) &&
Register::isPhysicalRegister(VRM.getPhys(VirtReg))) {
// This can create a %noreg operand in rare cases when the sub-register
@ -1258,7 +1258,7 @@ findNextInsertLocation(MachineBasicBlock *MBB,
const TargetRegisterInfo &TRI) {
if (!LocMO.isReg())
return MBB->instr_end();
unsigned Reg = LocMO.getReg();
Register Reg = LocMO.getReg();
// Find the next instruction in the MBB that define the register Reg.
while (I != MBB->end() && !I->isTerminator()) {

View File

@ -986,7 +986,7 @@ public:
MO.setIsKill(false);
}
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg)
continue;
if (Register::isVirtualRegister(Reg)) {
@ -1644,7 +1644,7 @@ void LiveIntervals::splitSeparateComponents(LiveInterval &LI,
unsigned Reg = LI.reg;
const TargetRegisterClass *RegClass = MRI->getRegClass(Reg);
for (unsigned I = 1; I < NumComp; ++I) {
unsigned NewVReg = MRI->createVirtualRegister(RegClass);
Register NewVReg = MRI->createVirtualRegister(RegClass);
LiveInterval &NewLI = createEmptyInterval(NewVReg);
SplitLIs.push_back(&NewLI);
}

View File

@ -46,7 +46,7 @@ void LivePhysRegs::removeDefs(const MachineInstr &MI) {
if (O->isReg()) {
if (!O->isDef() || O->isDebug())
continue;
unsigned Reg = O->getReg();
Register Reg = O->getReg();
if (!Register::isPhysicalRegister(Reg))
continue;
removeReg(Reg);
@ -60,7 +60,7 @@ void LivePhysRegs::addUses(const MachineInstr &MI) {
for (ConstMIBundleOperands O(MI); O.isValid(); ++O) {
if (!O->isReg() || !O->readsReg() || O->isDebug())
continue;
unsigned Reg = O->getReg();
Register Reg = O->getReg();
if (!Register::isPhysicalRegister(Reg))
continue;
addReg(Reg);
@ -86,7 +86,7 @@ void LivePhysRegs::stepForward(const MachineInstr &MI,
// Remove killed registers from the set.
for (ConstMIBundleOperands O(MI); O.isValid(); ++O) {
if (O->isReg() && !O->isDebug()) {
unsigned Reg = O->getReg();
Register Reg = O->getReg();
if (!Register::isPhysicalRegister(Reg))
continue;
if (O->isDef()) {
@ -292,7 +292,7 @@ void llvm::recomputeLivenessFlags(MachineBasicBlock &MBB) {
if (!MO->isReg() || !MO->isDef() || MO->isDebug())
continue;
unsigned Reg = MO->getReg();
Register Reg = MO->getReg();
if (Reg == 0)
continue;
assert(Register::isPhysicalRegister(Reg));
@ -309,7 +309,7 @@ void llvm::recomputeLivenessFlags(MachineBasicBlock &MBB) {
if (!MO->isReg() || !MO->readsReg() || MO->isDebug())
continue;
unsigned Reg = MO->getReg();
Register Reg = MO->getReg();
if (Reg == 0)
continue;
assert(Register::isPhysicalRegister(Reg));

View File

@ -32,7 +32,7 @@ void LiveRangeEdit::Delegate::anchor() { }
LiveInterval &LiveRangeEdit::createEmptyIntervalFrom(unsigned OldReg,
bool createSubRanges) {
unsigned VReg = MRI.createVirtualRegister(MRI.getRegClass(OldReg));
Register VReg = MRI.createVirtualRegister(MRI.getRegClass(OldReg));
if (VRM)
VRM->setIsSplitFromReg(VReg, VRM->getOriginal(OldReg));
@ -52,7 +52,7 @@ LiveInterval &LiveRangeEdit::createEmptyIntervalFrom(unsigned OldReg,
}
unsigned LiveRangeEdit::createFrom(unsigned OldReg) {
unsigned VReg = MRI.createVirtualRegister(MRI.getRegClass(OldReg));
Register VReg = MRI.createVirtualRegister(MRI.getRegClass(OldReg));
if (VRM) {
VRM->setIsSplitFromReg(VReg, VRM->getOriginal(OldReg));
}
@ -308,7 +308,7 @@ void LiveRangeEdit::eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink,
MOE = MI->operands_end(); MOI != MOE; ++MOI) {
if (!MOI->isReg())
continue;
unsigned Reg = MOI->getReg();
Register Reg = MOI->getReg();
if (!Register::isVirtualRegister(Reg)) {
// Check if MI reads any unreserved physregs.
if (Reg && MOI->readsReg() && !MRI.isReserved(Reg))

View File

@ -172,7 +172,7 @@ bool LiveRangeShrink::runOnMachineFunction(MachineFunction &MF) {
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg() || MO.isDead() || MO.isDebug())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
// Do not move the instruction if it def/uses a physical register,
// unless it is a constant physical register or a noreg.
if (!Register::isVirtualRegister(Reg)) {

View File

@ -118,7 +118,7 @@ void LiveRegMatrix::assign(LiveInterval &VirtReg, unsigned PhysReg) {
}
void LiveRegMatrix::unassign(LiveInterval &VirtReg) {
unsigned PhysReg = VRM->getPhys(VirtReg.reg);
Register PhysReg = VRM->getPhys(VirtReg.reg);
LLVM_DEBUG(dbgs() << "unassigning " << printReg(VirtReg.reg, TRI) << " from "
<< printReg(PhysReg, TRI) << ':');
VRM->clearVirt(VirtReg.reg);

View File

@ -47,7 +47,7 @@ void LiveRegUnits::stepBackward(const MachineInstr &MI) {
if (O->isReg()) {
if (!O->isDef() || O->isDebug())
continue;
unsigned Reg = O->getReg();
Register Reg = O->getReg();
if (!Register::isPhysicalRegister(Reg))
continue;
removeReg(Reg);
@ -59,7 +59,7 @@ void LiveRegUnits::stepBackward(const MachineInstr &MI) {
for (ConstMIBundleOperands O(MI); O.isValid(); ++O) {
if (!O->isReg() || !O->readsReg() || O->isDebug())
continue;
unsigned Reg = O->getReg();
Register Reg = O->getReg();
if (!Register::isPhysicalRegister(Reg))
continue;
addReg(Reg);
@ -70,7 +70,7 @@ void LiveRegUnits::accumulate(const MachineInstr &MI) {
// Add defs, uses and regmask clobbers to the set.
for (ConstMIBundleOperands O(MI); O.isValid(); ++O) {
if (O->isReg()) {
unsigned Reg = O->getReg();
Register Reg = O->getReg();
if (!Register::isPhysicalRegister(Reg))
continue;
if (!O->isDef() && !O->readsReg())

View File

@ -214,7 +214,7 @@ MachineInstr *LiveVariables::FindLastPartialDef(unsigned Reg,
MachineOperand &MO = LastDef->getOperand(i);
if (!MO.isReg() || !MO.isDef() || MO.getReg() == 0)
continue;
unsigned DefReg = MO.getReg();
Register DefReg = MO.getReg();
if (TRI->isSubRegister(Reg, DefReg)) {
for (MCSubRegIterator SubRegs(DefReg, TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs)
@ -519,7 +519,7 @@ void LiveVariables::runOnInstr(MachineInstr &MI,
}
if (!MO.isReg() || MO.getReg() == 0)
continue;
unsigned MOReg = MO.getReg();
Register MOReg = MO.getReg();
if (MO.isUse()) {
if (!(Register::isPhysicalRegister(MOReg) && MRI->isReserved(MOReg)))
MO.setIsKill(false);
@ -690,7 +690,7 @@ void LiveVariables::removeVirtualRegistersKilled(MachineInstr &MI) {
MachineOperand &MO = MI.getOperand(i);
if (MO.isReg() && MO.isKill()) {
MO.setIsKill(false);
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Register::isVirtualRegister(Reg)) {
bool removed = getVarInfo(Reg).removeKill(MI);
assert(removed && "kill not in register's VarInfo?");

View File

@ -340,8 +340,8 @@ static bool propagateLocalCopies(MachineBasicBlock *MBB) {
if (!MI->getOperand(1).isReg())
continue;
const unsigned Dst = MI->getOperand(0).getReg();
const unsigned Src = MI->getOperand(1).getReg();
const Register Dst = MI->getOperand(0).getReg();
const Register Src = MI->getOperand(1).getReg();
if (!Register::isVirtualRegister(Dst))
continue;
@ -386,7 +386,7 @@ static std::vector<MachineInstr *> populateCandidates(MachineBasicBlock *MBB) {
bool DoesMISideEffect = false;
if (MI->getNumOperands() > 0 && MI->getOperand(0).isReg()) {
const unsigned Dst = MI->getOperand(0).getReg();
const Register Dst = MI->getOperand(0).getReg();
DoesMISideEffect |= !Register::isVirtualRegister(Dst);
for (auto UI = MRI.use_begin(Dst); UI != MRI.use_end(); ++UI) {
@ -754,7 +754,7 @@ static bool runOnBasicBlock(MachineBasicBlock *MBB,
for (unsigned i = 0; i < IdempotentInstCount && MII != MBB->end(); ++i) {
MachineInstr &MI = *MII++;
Changed = true;
unsigned vRegToRename = MI.getOperand(0).getReg();
Register vRegToRename = MI.getOperand(0).getReg();
auto Rename = NVC.createVirtualRegister(vRegToRename);
std::vector<MachineOperand *> RenameMOs;

View File

@ -500,14 +500,14 @@ MachineBasicBlock::addLiveIn(MCRegister PhysReg, const TargetRegisterClass *RC)
if (LiveIn)
for (;I != E && I->isCopy(); ++I)
if (I->getOperand(1).getReg() == PhysReg) {
unsigned VirtReg = I->getOperand(0).getReg();
Register VirtReg = I->getOperand(0).getReg();
if (!MRI.constrainRegClass(VirtReg, RC))
llvm_unreachable("Incompatible live-in register class.");
return VirtReg;
}
// No luck, create a virtual register.
unsigned VirtReg = MRI.createVirtualRegister(RC);
Register VirtReg = MRI.createVirtualRegister(RC);
BuildMI(*this, I, DebugLoc(), TII.get(TargetOpcode::COPY), VirtReg)
.addReg(PhysReg, RegState::Kill);
if (!LiveIn)
@ -907,7 +907,7 @@ MachineBasicBlock *MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ,
if (!OI->isReg() || OI->getReg() == 0 ||
!OI->isUse() || !OI->isKill() || OI->isUndef())
continue;
unsigned Reg = OI->getReg();
Register Reg = OI->getReg();
if (Register::isPhysicalRegister(Reg) ||
LV->getVarInfo(Reg).removeKill(*MI)) {
KilledRegs.push_back(Reg);
@ -928,7 +928,7 @@ MachineBasicBlock *MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ,
if (!OI->isReg() || OI->getReg() == 0)
continue;
unsigned Reg = OI->getReg();
Register Reg = OI->getReg();
if (!is_contained(UsedRegs, Reg))
UsedRegs.push_back(Reg);
}
@ -1033,7 +1033,7 @@ MachineBasicBlock *MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ,
for (unsigned ni = 1, ne = I->getNumOperands(); ni != ne; ni += 2) {
if (I->getOperand(ni+1).getMBB() == NMBB) {
MachineOperand &MO = I->getOperand(ni);
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
PHISrcRegs.insert(Reg);
if (MO.isUndef())
continue;

View File

@ -167,14 +167,14 @@ bool MachineCSE::PerformTrivialCopyPropagation(MachineInstr *MI,
for (MachineOperand &MO : MI->operands()) {
if (!MO.isReg() || !MO.isUse())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Register::isVirtualRegister(Reg))
continue;
bool OnlyOneUse = MRI->hasOneNonDBGUse(Reg);
MachineInstr *DefMI = MRI->getVRegDef(Reg);
if (!DefMI->isCopy())
continue;
unsigned SrcReg = DefMI->getOperand(1).getReg();
Register SrcReg = DefMI->getOperand(1).getReg();
if (!Register::isVirtualRegister(SrcReg))
continue;
if (DefMI->getOperand(0).getSubReg())
@ -280,7 +280,7 @@ bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI,
for (const MachineOperand &MO : MI->operands()) {
if (!MO.isReg() || MO.isDef())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg)
continue;
if (Register::isVirtualRegister(Reg))
@ -299,7 +299,7 @@ bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI,
const MachineOperand &MO = MOP.value();
if (!MO.isReg() || !MO.isDef())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg)
continue;
if (Register::isVirtualRegister(Reg))
@ -376,7 +376,7 @@ bool MachineCSE::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
return false;
if (!MO.isReg() || !MO.isDef())
continue;
unsigned MOReg = MO.getReg();
Register MOReg = MO.getReg();
if (Register::isVirtualRegister(MOReg))
continue;
if (PhysRefs.count(MOReg))
@ -593,8 +593,8 @@ bool MachineCSE::ProcessBlockCSE(MachineBasicBlock *MBB) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isDef())
continue;
unsigned OldReg = MO.getReg();
unsigned NewReg = CSMI->getOperand(i).getReg();
Register OldReg = MO.getReg();
Register NewReg = CSMI->getOperand(i).getReg();
// Go through implicit defs of CSMI and MI, if a def is not dead at MI,
// we should make sure it is not dead at CSMI.
@ -822,8 +822,8 @@ bool MachineCSE::ProcessBlockPRE(MachineDominatorTree *DT,
assert(MI->getOperand(0).isDef() &&
"First operand of instr with one explicit def must be this def");
unsigned VReg = MI->getOperand(0).getReg();
unsigned NewReg = MRI->cloneVirtualRegister(VReg);
Register VReg = MI->getOperand(0).getReg();
Register NewReg = MRI->cloneVirtualRegister(VReg);
if (!isProfitableToCSE(NewReg, VReg, CMBB, MI))
continue;
MachineInstr &NewMI =

View File

@ -119,8 +119,8 @@ public:
void trackCopy(MachineInstr *MI, const TargetRegisterInfo &TRI) {
assert(MI->isCopy() && "Tracking non-copy?");
unsigned Def = MI->getOperand(0).getReg();
unsigned Src = MI->getOperand(1).getReg();
Register Def = MI->getOperand(0).getReg();
Register Src = MI->getOperand(1).getReg();
// Remember Def is defined by the copy.
for (MCRegUnitIterator RUI(Def, &TRI); RUI.isValid(); ++RUI)
@ -163,8 +163,8 @@ public:
// Check that the available copy isn't clobbered by any regmasks between
// itself and the destination.
unsigned AvailSrc = AvailCopy->getOperand(1).getReg();
unsigned AvailDef = AvailCopy->getOperand(0).getReg();
Register AvailSrc = AvailCopy->getOperand(1).getReg();
Register AvailDef = AvailCopy->getOperand(0).getReg();
for (const MachineInstr &MI :
make_range(AvailCopy->getIterator(), DestCopy.getIterator()))
for (const MachineOperand &MO : MI.operands())
@ -262,8 +262,8 @@ void MachineCopyPropagation::ReadRegister(unsigned Reg, MachineInstr &Reader,
/// isNopCopy("ecx = COPY eax", AH, CL) == false
static bool isNopCopy(const MachineInstr &PreviousCopy, unsigned Src,
unsigned Def, const TargetRegisterInfo *TRI) {
unsigned PreviousSrc = PreviousCopy.getOperand(1).getReg();
unsigned PreviousDef = PreviousCopy.getOperand(0).getReg();
Register PreviousSrc = PreviousCopy.getOperand(1).getReg();
Register PreviousDef = PreviousCopy.getOperand(0).getReg();
if (Src == PreviousSrc) {
assert(Def == PreviousDef);
return true;
@ -300,7 +300,7 @@ bool MachineCopyPropagation::eraseIfRedundant(MachineInstr &Copy, unsigned Src,
// Copy was redundantly redefining either Src or Def. Remove earlier kill
// flags between Copy and PrevCopy because the value will be reused now.
assert(Copy.isCopy());
unsigned CopyDef = Copy.getOperand(0).getReg();
Register CopyDef = Copy.getOperand(0).getReg();
assert(CopyDef == Src || CopyDef == Def);
for (MachineInstr &MI :
make_range(PrevCopy->getIterator(), Copy.getIterator()))
@ -319,7 +319,7 @@ bool MachineCopyPropagation::isForwardableRegClassCopy(const MachineInstr &Copy,
const MachineInstr &UseI,
unsigned UseIdx) {
unsigned CopySrcReg = Copy.getOperand(1).getReg();
Register CopySrcReg = Copy.getOperand(1).getReg();
// If the new register meets the opcode register constraints, then allow
// forwarding.
@ -410,9 +410,9 @@ void MachineCopyPropagation::forwardUses(MachineInstr &MI) {
if (!Copy)
continue;
unsigned CopyDstReg = Copy->getOperand(0).getReg();
Register CopyDstReg = Copy->getOperand(0).getReg();
const MachineOperand &CopySrc = Copy->getOperand(1);
unsigned CopySrcReg = CopySrc.getReg();
Register CopySrcReg = CopySrc.getReg();
// FIXME: Don't handle partial uses of wider COPYs yet.
if (MOUse.getReg() != CopyDstReg) {
@ -468,8 +468,8 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
// Analyze copies (which don't overlap themselves).
if (MI->isCopy() && !TRI->regsOverlap(MI->getOperand(0).getReg(),
MI->getOperand(1).getReg())) {
unsigned Def = MI->getOperand(0).getReg();
unsigned Src = MI->getOperand(1).getReg();
Register Def = MI->getOperand(0).getReg();
Register Src = MI->getOperand(1).getReg();
assert(!Register::isVirtualRegister(Def) &&
!Register::isVirtualRegister(Src) &&
@ -504,7 +504,7 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
for (const MachineOperand &MO : MI->implicit_operands()) {
if (!MO.isReg() || !MO.readsReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg)
continue;
ReadRegister(Reg, *MI, RegularUse);
@ -527,7 +527,7 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
for (const MachineOperand &MO : MI->implicit_operands()) {
if (!MO.isReg() || !MO.isDef())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg)
continue;
Tracker.clobberRegister(Reg, *TRI);
@ -541,7 +541,7 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
// Clobber any earlyclobber regs first.
for (const MachineOperand &MO : MI->operands())
if (MO.isReg() && MO.isEarlyClobber()) {
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
// If we have a tied earlyclobber, that means it is also read by this
// instruction, so we need to make sure we don't remove it as dead
// later.
@ -560,7 +560,7 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
RegMask = &MO;
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg)
continue;
@ -583,7 +583,7 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
MaybeDeadCopies.begin();
DI != MaybeDeadCopies.end();) {
MachineInstr *MaybeDead = *DI;
unsigned Reg = MaybeDead->getOperand(0).getReg();
Register Reg = MaybeDead->getOperand(0).getReg();
assert(!MRI->isReserved(Reg));
if (!RegMask->clobbersPhysReg(Reg)) {

View File

@ -154,7 +154,7 @@ void llvm::finalizeBundle(MachineBasicBlock &MBB,
continue;
}
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg)
continue;
@ -177,7 +177,7 @@ void llvm::finalizeBundle(MachineBasicBlock &MBB,
for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
MachineOperand &MO = *Defs[i];
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg)
continue;
@ -329,7 +329,7 @@ MachineOperandIteratorBase::analyzePhysReg(unsigned Reg,
if (!MO.isReg())
continue;
unsigned MOReg = MO.getReg();
Register MOReg = MO.getReg();
if (!MOReg || !Register::isPhysicalRegister(MOReg))
continue;

View File

@ -424,7 +424,7 @@ void MachineLICMBase::ProcessMI(MachineInstr *MI,
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg)
continue;
assert(Register::isPhysicalRegister(Reg) &&
@ -526,7 +526,7 @@ void MachineLICMBase::HoistRegionPostRA() {
for (const MachineOperand &MO : TI->operands()) {
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg)
continue;
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
@ -554,7 +554,7 @@ void MachineLICMBase::HoistRegionPostRA() {
for (const MachineOperand &MO : MI->operands()) {
if (!MO.isReg() || MO.isDef() || !MO.getReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (PhysRegDefs.test(Reg) ||
PhysRegClobbers.test(Reg)) {
// If it's using a non-loop-invariant register, then it's obviously
@ -852,7 +852,7 @@ MachineLICMBase::calcRegisterCost(const MachineInstr *MI, bool ConsiderSeen,
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || MO.isImplicit())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Register::isVirtualRegister(Reg))
continue;
@ -922,7 +922,7 @@ static bool isInvariantStore(const MachineInstr &MI,
// Check that all register operands are caller-preserved physical registers.
for (const MachineOperand &MO : MI.operands()) {
if (MO.isReg()) {
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
// If operand is a virtual register, check if it comes from a copy of a
// physical register.
if (Register::isVirtualRegister(Reg))
@ -955,14 +955,14 @@ static bool isCopyFeedingInvariantStore(const MachineInstr &MI,
const MachineFunction *MF = MI.getMF();
// Check that we are copying a constant physical register.
unsigned CopySrcReg = MI.getOperand(1).getReg();
Register CopySrcReg = MI.getOperand(1).getReg();
if (Register::isVirtualRegister(CopySrcReg))
return false;
if (!TRI->isCallerPreservedPhysReg(CopySrcReg, *MF))
return false;
unsigned CopyDstReg = MI.getOperand(0).getReg();
Register CopyDstReg = MI.getOperand(0).getReg();
// Check if any of the uses of the copy are invariant stores.
assert(Register::isVirtualRegister(CopyDstReg) &&
"copy dst is not a virtual reg");
@ -1010,7 +1010,7 @@ bool MachineLICMBase::IsLoopInvariantInst(MachineInstr &I) {
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Reg == 0) continue;
// Don't hoist an instruction that uses or defines a physical register.
@ -1061,7 +1061,7 @@ bool MachineLICMBase::HasLoopPHIUse(const MachineInstr *MI) const {
for (const MachineOperand &MO : MI->operands()) {
if (!MO.isReg() || !MO.isDef())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Register::isVirtualRegister(Reg))
continue;
for (MachineInstr &UseMI : MRI->use_instructions(Reg)) {
@ -1104,7 +1104,7 @@ bool MachineLICMBase::HasHighOperandLatency(MachineInstr &MI,
const MachineOperand &MO = UseMI.getOperand(i);
if (!MO.isReg() || !MO.isUse())
continue;
unsigned MOReg = MO.getReg();
Register MOReg = MO.getReg();
if (MOReg != Reg)
continue;
@ -1132,7 +1132,7 @@ bool MachineLICMBase::IsCheapInstruction(MachineInstr &MI) const {
if (!DefMO.isReg() || !DefMO.isDef())
continue;
--NumDefs;
unsigned Reg = DefMO.getReg();
Register Reg = DefMO.getReg();
if (Register::isPhysicalRegister(Reg))
continue;
@ -1225,7 +1225,7 @@ bool MachineLICMBase::IsProfitableToHoist(MachineInstr &MI) {
const MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || MO.isImplicit())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Register::isVirtualRegister(Reg))
continue;
if (MO.isDef() && HasHighOperandLatency(MI, i, Reg)) {
@ -1304,7 +1304,7 @@ MachineInstr *MachineLICMBase::ExtractHoistableLoad(MachineInstr *MI) {
MachineFunction &MF = *MI->getMF();
const TargetRegisterClass *RC = TII->getRegClass(MID, LoadRegIndex, TRI, MF);
// Ok, we're unfolding. Create a temporary register and do the unfold.
unsigned Reg = MRI->createVirtualRegister(RC);
Register Reg = MRI->createVirtualRegister(RC);
SmallVector<MachineInstr *, 2> NewMIs;
bool Success = TII->unfoldMemoryOperand(MF, *MI, Reg,
@ -1390,8 +1390,8 @@ bool MachineLICMBase::EliminateCSE(MachineInstr *MI,
SmallVector<const TargetRegisterClass*, 2> OrigRCs;
for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
unsigned Idx = Defs[i];
unsigned Reg = MI->getOperand(Idx).getReg();
unsigned DupReg = Dup->getOperand(Idx).getReg();
Register Reg = MI->getOperand(Idx).getReg();
Register DupReg = Dup->getOperand(Idx).getReg();
OrigRCs.push_back(MRI->getRegClass(DupReg));
if (!MRI->constrainRegClass(DupReg, MRI->getRegClass(Reg))) {
@ -1403,8 +1403,8 @@ bool MachineLICMBase::EliminateCSE(MachineInstr *MI,
}
for (unsigned Idx : Defs) {
unsigned Reg = MI->getOperand(Idx).getReg();
unsigned DupReg = Dup->getOperand(Idx).getReg();
Register Reg = MI->getOperand(Idx).getReg();
Register DupReg = Dup->getOperand(Idx).getReg();
MRI->replaceRegWith(Reg, DupReg);
MRI->clearKillFlags(DupReg);
}

View File

@ -750,7 +750,7 @@ void MachineOperand::print(raw_ostream &OS, ModuleSlotTracker &MST,
printTargetFlags(OS, *this);
switch (getType()) {
case MachineOperand::MO_Register: {
unsigned Reg = getReg();
Register Reg = getReg();
if (isImplicit())
OS << (isDef() ? "implicit-def " : "implicit ");
else if (PrintDef && isDef())

View File

@ -349,7 +349,7 @@ void MachinePipeliner::preprocessPhiNodes(MachineBasicBlock &B) {
// If the operand uses a subregister, replace it with a new register
// without subregisters, and generate a copy to the new register.
unsigned NewReg = MRI.createVirtualRegister(RC);
Register NewReg = MRI.createVirtualRegister(RC);
MachineBasicBlock &PredB = *PI.getOperand(i+1).getMBB();
MachineBasicBlock::iterator At = PredB.getFirstTerminator();
const DebugLoc &DL = PredB.findDebugLoc(At);
@ -730,7 +730,7 @@ void SwingSchedulerDAG::updatePhiDependences() {
MOI != MOE; ++MOI) {
if (!MOI->isReg())
continue;
unsigned Reg = MOI->getReg();
Register Reg = MOI->getReg();
if (MOI->isDef()) {
// If the register is used by a Phi, then create an anti dependence.
for (MachineRegisterInfo::use_instr_iterator
@ -809,7 +809,7 @@ void SwingSchedulerDAG::changeDependences() {
continue;
// Get the MI and SUnit for the instruction that defines the original base.
unsigned OrigBase = I.getInstr()->getOperand(BasePos).getReg();
Register OrigBase = I.getInstr()->getOperand(BasePos).getReg();
MachineInstr *DefMI = MRI.getUniqueVRegDef(OrigBase);
if (!DefMI)
continue;
@ -1514,7 +1514,7 @@ static void computeLiveOuts(MachineFunction &MF, RegPressureTracker &RPTracker,
continue;
for (const MachineOperand &MO : MI->operands())
if (MO.isReg() && MO.isUse()) {
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Register::isVirtualRegister(Reg))
Uses.insert(Reg);
else if (MRI.isAllocatable(Reg))
@ -1525,7 +1525,7 @@ static void computeLiveOuts(MachineFunction &MF, RegPressureTracker &RPTracker,
for (SUnit *SU : NS)
for (const MachineOperand &MO : SU->getInstr()->operands())
if (MO.isReg() && MO.isDef() && !MO.isDead()) {
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Register::isVirtualRegister(Reg)) {
if (!Uses.count(Reg))
LiveOutRegs.push_back(RegisterMaskPair(Reg,
@ -2311,7 +2311,7 @@ void SwingSchedulerDAG::generateExistingPhis(
for (MachineBasicBlock::iterator BBI = BB->instr_begin(),
BBE = BB->getFirstNonPHI();
BBI != BBE; ++BBI) {
unsigned Def = BBI->getOperand(0).getReg();
Register Def = BBI->getOperand(0).getReg();
unsigned InitVal = 0;
unsigned LoopVal = 0;
@ -2558,7 +2558,7 @@ void SwingSchedulerDAG::generatePhis(
int StageScheduled = Schedule.stageScheduled(getSUnit(&*BBI));
assert(StageScheduled != -1 && "Expecting scheduled instruction.");
unsigned Def = MO.getReg();
Register Def = MO.getReg();
unsigned NumPhis = Schedule.getStagesForReg(Def, CurStageNum);
// An instruction scheduled in stage 0 and is used after the loop
// requires a phi in the epilog for the last definition from either
@ -2591,7 +2591,7 @@ void SwingSchedulerDAG::generatePhis(
PhiOp2 = VRMap[PrevStage - np][Def];
const TargetRegisterClass *RC = MRI.getRegClass(Def);
unsigned NewReg = MRI.createVirtualRegister(RC);
Register NewReg = MRI.createVirtualRegister(RC);
MachineInstrBuilder NewPhi =
BuildMI(*NewBB, NewBB->getFirstNonPHI(), DebugLoc(),
@ -2656,7 +2656,7 @@ void SwingSchedulerDAG::removeDeadInstructions(MachineBasicBlock *KernelBB,
MOI != MOE; ++MOI) {
if (!MOI->isReg() || !MOI->isDef())
continue;
unsigned reg = MOI->getReg();
Register reg = MOI->getReg();
// Assume physical registers are used, unless they are marked dead.
if (Register::isPhysicalRegister(reg)) {
used = !MOI->isDead();
@ -2694,7 +2694,7 @@ void SwingSchedulerDAG::removeDeadInstructions(MachineBasicBlock *KernelBB,
BBI != BBE;) {
MachineInstr *MI = &*BBI;
++BBI;
unsigned reg = MI->getOperand(0).getReg();
Register reg = MI->getOperand(0).getReg();
if (MRI.use_begin(reg) == MRI.use_end()) {
LIS.RemoveMachineInstrFromMaps(*MI);
MI->eraseFromParent();
@ -2717,7 +2717,7 @@ void SwingSchedulerDAG::splitLifetimes(MachineBasicBlock *KernelBB,
SMSchedule &Schedule) {
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
for (auto &PHI : KernelBB->phis()) {
unsigned Def = PHI.getOperand(0).getReg();
Register Def = PHI.getOperand(0).getReg();
// Check for any Phi definition that used as an operand of another Phi
// in the same block.
for (MachineRegisterInfo::use_instr_iterator I = MRI.use_instr_begin(Def),
@ -2854,7 +2854,7 @@ bool SwingSchedulerDAG::computeDelta(MachineInstr &MI, unsigned &Delta) {
if (!BaseOp->isReg())
return false;
unsigned BaseReg = BaseOp->getReg();
Register BaseReg = BaseOp->getReg();
MachineRegisterInfo &MRI = MF.getRegInfo();
// Check if there is a Phi. If so, get the definition in the loop.
@ -2964,11 +2964,11 @@ void SwingSchedulerDAG::updateInstruction(MachineInstr *NewMI, bool LastDef,
MachineOperand &MO = NewMI->getOperand(i);
if (!MO.isReg() || !Register::isVirtualRegister(MO.getReg()))
continue;
unsigned reg = MO.getReg();
Register reg = MO.getReg();
if (MO.isDef()) {
// Create a new virtual register for the definition.
const TargetRegisterClass *RC = MRI.getRegClass(reg);
unsigned NewReg = MRI.createVirtualRegister(RC);
Register NewReg = MRI.createVirtualRegister(RC);
MO.setReg(NewReg);
VRMap[CurStageNum][reg] = NewReg;
if (LastDef)
@ -3051,7 +3051,7 @@ void SwingSchedulerDAG::rewritePhiValues(MachineBasicBlock *NewBB,
unsigned InitVal = 0;
unsigned LoopVal = 0;
getPhiRegs(PHI, BB, InitVal, LoopVal);
unsigned PhiDef = PHI.getOperand(0).getReg();
Register PhiDef = PHI.getOperand(0).getReg();
unsigned PhiStage =
(unsigned)Schedule.stageScheduled(getSUnit(MRI.getVRegDef(PhiDef)));
@ -3147,7 +3147,7 @@ bool SwingSchedulerDAG::canUseLastOffsetValue(MachineInstr *MI,
unsigned BasePosLd, OffsetPosLd;
if (!TII->getBaseAndOffsetPosition(*MI, BasePosLd, OffsetPosLd))
return false;
unsigned BaseReg = MI->getOperand(BasePosLd).getReg();
Register BaseReg = MI->getOperand(BasePosLd).getReg();
// Look for the Phi instruction.
MachineRegisterInfo &MRI = MI->getMF()->getRegInfo();
@ -3202,7 +3202,7 @@ void SwingSchedulerDAG::applyInstrChange(MachineInstr *MI,
unsigned BasePos, OffsetPos;
if (!TII->getBaseAndOffsetPosition(*MI, BasePos, OffsetPos))
return;
unsigned BaseReg = MI->getOperand(BasePos).getReg();
Register BaseReg = MI->getOperand(BasePos).getReg();
MachineInstr *LoopDef = findDefInLoop(BaseReg);
int DefStageNum = Schedule.stageScheduled(getSUnit(LoopDef));
int DefCycleNum = Schedule.cycleScheduled(getSUnit(LoopDef));
@ -3502,7 +3502,7 @@ void SMSchedule::orderDependence(SwingSchedulerDAG *SSD, SUnit *SU,
if (!MO.isReg() || !Register::isVirtualRegister(MO.getReg()))
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
unsigned BasePos, OffsetPos;
if (ST.getInstrInfo()->getBaseAndOffsetPosition(*MI, BasePos, OffsetPos))
if (MI->getOperand(BasePos).getReg() == Reg)
@ -3857,7 +3857,7 @@ void SMSchedule::finalizeSchedule(SwingSchedulerDAG *SSD) {
if (!Op.isReg() || !Op.isDef())
continue;
unsigned Reg = Op.getReg();
Register Reg = Op.getReg();
unsigned MaxDiff = 0;
bool PhiIsSwapped = false;
for (MachineRegisterInfo::use_iterator UI = MRI.use_begin(Reg),

View File

@ -95,7 +95,7 @@ unsigned LookForIdenticalPHI(MachineBasicBlock *BB,
while (I != BB->end() && I->isPHI()) {
bool Same = true;
for (unsigned i = 1, e = I->getNumOperands(); i != e; i += 2) {
unsigned SrcReg = I->getOperand(i).getReg();
Register SrcReg = I->getOperand(i).getReg();
MachineBasicBlock *SrcBB = I->getOperand(i+1).getMBB();
if (AVals[SrcBB] != SrcReg) {
Same = false;
@ -118,7 +118,7 @@ MachineInstrBuilder InsertNewDef(unsigned Opcode,
const TargetRegisterClass *RC,
MachineRegisterInfo *MRI,
const TargetInstrInfo *TII) {
unsigned NewVR = MRI->createVirtualRegister(RC);
Register NewVR = MRI->createVirtualRegister(RC);
return BuildMI(*BB, I, DebugLoc(), TII->get(Opcode), NewVR);
}

View File

@ -933,7 +933,7 @@ void ScheduleDAGMILive::collectVRegUses(SUnit &SU) {
if (TrackLaneMasks && !MO.isUse())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Register::isVirtualRegister(Reg))
continue;
@ -1687,12 +1687,12 @@ void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
// Check for pure vreg copies.
const MachineOperand &SrcOp = Copy->getOperand(1);
unsigned SrcReg = SrcOp.getReg();
Register SrcReg = SrcOp.getReg();
if (!Register::isVirtualRegister(SrcReg) || !SrcOp.readsReg())
return;
const MachineOperand &DstOp = Copy->getOperand(0);
unsigned DstReg = DstOp.getReg();
Register DstReg = DstOp.getReg();
if (!Register::isVirtualRegister(DstReg) || DstOp.isDead())
return;

View File

@ -195,8 +195,8 @@ bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr &MI,
if (!MI.isCopy())
return false;
unsigned SrcReg = MI.getOperand(1).getReg();
unsigned DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
Register DstReg = MI.getOperand(0).getReg();
if (!Register::isVirtualRegister(SrcReg) ||
!Register::isVirtualRegister(DstReg) || !MRI->hasOneNonDBGUse(SrcReg))
return false;
@ -414,7 +414,7 @@ bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr &MI,
const MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || !MO.isUse())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Reg == 0)
continue;
@ -613,7 +613,7 @@ MachineSinking::FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB,
const MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg()) continue; // Ignore non-register operands.
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Reg == 0) continue;
if (Register::isPhysicalRegister(Reg)) {
@ -815,7 +815,7 @@ bool MachineSinking::SinkInstruction(MachineInstr &MI, bool &SawStore,
for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
const MachineOperand &MO = MI.getOperand(I);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Reg == 0 || !Register::isPhysicalRegister(Reg))
continue;
if (SuccToSinkTo->isLiveIn(Reg))
@ -1029,7 +1029,7 @@ static void clearKillFlags(MachineInstr *MI, MachineBasicBlock &CurBB,
const TargetRegisterInfo *TRI) {
for (auto U : UsedOpsInCopy) {
MachineOperand &MO = MI->getOperand(U);
unsigned SrcReg = MO.getReg();
Register SrcReg = MO.getReg();
if (!UsedRegUnits.available(SrcReg)) {
MachineBasicBlock::iterator NI = std::next(MI->getIterator());
for (MachineInstr &UI : make_range(NI, CurBB.end())) {
@ -1052,7 +1052,7 @@ static void updateLiveIn(MachineInstr *MI, MachineBasicBlock *SuccBB,
for (MCSubRegIterator S(DefReg, TRI, true); S.isValid(); ++S)
SuccBB->removeLiveIn(*S);
for (auto U : UsedOpsInCopy) {
unsigned Reg = MI->getOperand(U).getReg();
Register Reg = MI->getOperand(U).getReg();
if (!SuccBB->isLiveIn(Reg))
SuccBB->addLiveIn(Reg);
}
@ -1068,7 +1068,7 @@ static bool hasRegisterDependency(MachineInstr *MI,
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg)
continue;
if (MO.isDef()) {
@ -1181,7 +1181,7 @@ bool PostRAMachineSinking::tryToSinkCopy(MachineBasicBlock &CurBB,
for (auto &MO : MI->operands()) {
if (!MO.isReg() || !MO.isDef())
continue;
unsigned reg = MO.getReg();
Register reg = MO.getReg();
for (auto *MI : SeenDbgInstrs.lookup(reg))
DbgValsToSink.push_back(MI);
}

View File

@ -660,7 +660,7 @@ static bool getDataDeps(const MachineInstr &UseMI,
const MachineOperand &MO = *I;
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg)
continue;
if (Register::isPhysicalRegister(Reg)) {
@ -687,7 +687,7 @@ static void getPHIDeps(const MachineInstr &UseMI,
assert(UseMI.isPHI() && UseMI.getNumOperands() % 2 && "Bad PHI");
for (unsigned i = 1; i != UseMI.getNumOperands(); i += 2) {
if (UseMI.getOperand(i + 1).getMBB() == Pred) {
unsigned Reg = UseMI.getOperand(i).getReg();
Register Reg = UseMI.getOperand(i).getReg();
Deps.push_back(DataDep(MRI, Reg, i));
return;
}
@ -708,7 +708,7 @@ static void updatePhysDepsDownwards(const MachineInstr *UseMI,
const MachineOperand &MO = *MI;
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Register::isPhysicalRegister(Reg))
continue;
// Track live defs and kills for updating RegUnits.
@ -902,7 +902,7 @@ static unsigned updatePhysDepsUpwards(const MachineInstr &MI, unsigned Height,
const MachineOperand &MO = *MOI;
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Register::isPhysicalRegister(Reg))
continue;
if (MO.readsReg())
@ -930,7 +930,7 @@ static unsigned updatePhysDepsUpwards(const MachineInstr &MI, unsigned Height,
// Now we know the height of MI. Update any regunits read.
for (unsigned i = 0, e = ReadOps.size(); i != e; ++i) {
unsigned Reg = MI.getOperand(ReadOps[i]).getReg();
Register Reg = MI.getOperand(ReadOps[i]).getReg();
for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) {
LiveRegUnit &LRU = RegUnits[*Units];
// Set the height to the highest reader of the unit.

View File

@ -1609,7 +1609,7 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
switch (MO->getType()) {
case MachineOperand::MO_Register: {
const unsigned Reg = MO->getReg();
const Register Reg = MO->getReg();
if (!Reg)
return;
if (MRI->tracksLiveness() && !MI->isDebugValue())
@ -2184,7 +2184,7 @@ void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
MODef.isEarlyClobber() || MODef.isDebug())
report("Unexpected flag on PHI operand", &MODef, 0);
unsigned DefReg = MODef.getReg();
Register DefReg = MODef.getReg();
if (!Register::isVirtualRegister(DefReg))
report("Expected first PHI operand to be a virtual register", &MODef, 0);

View File

@ -97,7 +97,7 @@ bool OptimizePHIs::IsSingleValuePHICycle(MachineInstr *MI,
unsigned &SingleValReg,
InstrSet &PHIsInCycle) {
assert(MI->isPHI() && "IsSingleValuePHICycle expects a PHI instruction");
unsigned DstReg = MI->getOperand(0).getReg();
Register DstReg = MI->getOperand(0).getReg();
// See if we already saw this register.
if (!PHIsInCycle.insert(MI).second)
@ -109,7 +109,7 @@ bool OptimizePHIs::IsSingleValuePHICycle(MachineInstr *MI,
// Scan the PHI operands.
for (unsigned i = 1; i != MI->getNumOperands(); i += 2) {
unsigned SrcReg = MI->getOperand(i).getReg();
Register SrcReg = MI->getOperand(i).getReg();
if (SrcReg == DstReg)
continue;
MachineInstr *SrcMI = MRI->getVRegDef(SrcReg);
@ -141,7 +141,7 @@ bool OptimizePHIs::IsSingleValuePHICycle(MachineInstr *MI,
/// other PHIs in a cycle.
bool OptimizePHIs::IsDeadPHICycle(MachineInstr *MI, InstrSet &PHIsInCycle) {
assert(MI->isPHI() && "IsDeadPHICycle expects a PHI instruction");
unsigned DstReg = MI->getOperand(0).getReg();
Register DstReg = MI->getOperand(0).getReg();
assert(Register::isVirtualRegister(DstReg) &&
"PHI destination is not a virtual register");
@ -176,7 +176,7 @@ bool OptimizePHIs::OptimizeBB(MachineBasicBlock &MBB) {
InstrSet PHIsInCycle;
if (IsSingleValuePHICycle(MI, SingleValReg, PHIsInCycle) &&
SingleValReg != 0) {
unsigned OldReg = MI->getOperand(0).getReg();
Register OldReg = MI->getOperand(0).getReg();
if (!MRI->constrainRegClass(SingleValReg, MRI->getRegClass(OldReg)))
continue;

View File

@ -168,7 +168,7 @@ bool PHIElimination::runOnMachineFunction(MachineFunction &MF) {
// Remove dead IMPLICIT_DEF instructions.
for (MachineInstr *DefMI : ImpDefs) {
unsigned DefReg = DefMI->getOperand(0).getReg();
Register DefReg = DefMI->getOperand(0).getReg();
if (MRI->use_nodbg_empty(DefReg)) {
if (LIS)
LIS->RemoveMachineInstrFromMaps(*DefMI);
@ -240,7 +240,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
MachineInstr *MPhi = MBB.remove(&*MBB.begin());
unsigned NumSrcs = (MPhi->getNumOperands() - 1) / 2;
unsigned DestReg = MPhi->getOperand(0).getReg();
Register DestReg = MPhi->getOperand(0).getReg();
assert(MPhi->getOperand(0).getSubReg() == 0 && "Can't handle sub-reg PHIs");
bool isDead = MPhi->getOperand(0).isDead();
@ -368,7 +368,7 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
// IncomingReg register in the corresponding predecessor basic block.
SmallPtrSet<MachineBasicBlock*, 8> MBBsInsertedInto;
for (int i = NumSrcs - 1; i >= 0; --i) {
unsigned SrcReg = MPhi->getOperand(i*2+1).getReg();
Register SrcReg = MPhi->getOperand(i * 2 + 1).getReg();
unsigned SrcSubReg = MPhi->getOperand(i*2+1).getSubReg();
bool SrcUndef = MPhi->getOperand(i*2+1).isUndef() ||
isImplicitlyDefined(SrcReg, *MRI);
@ -567,7 +567,7 @@ bool PHIElimination::SplitPHIEdges(MachineFunction &MF,
for (MachineBasicBlock::iterator BBI = MBB.begin(), BBE = MBB.end();
BBI != BBE && BBI->isPHI(); ++BBI) {
for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2) {
unsigned Reg = BBI->getOperand(i).getReg();
Register Reg = BBI->getOperand(i).getReg();
MachineBasicBlock *PreMBB = BBI->getOperand(i+1).getMBB();
// Is there a critical edge from PreMBB to MBB?
if (PreMBB->succ_size() == 1)

View File

@ -581,7 +581,7 @@ optimizeExtInstr(MachineInstr &MI, MachineBasicBlock &MBB,
MRI->constrainRegClass(DstReg, DstRC);
}
unsigned NewVR = MRI->createVirtualRegister(RC);
Register NewVR = MRI->createVirtualRegister(RC);
MachineInstr *Copy = BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(),
TII->get(TargetOpcode::COPY), NewVR)
.addReg(DstReg, 0, SubIdx);
@ -761,7 +761,7 @@ insertPHI(MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
// NewRC is only correct if no subregisters are involved. findNextSource()
// should have rejected those cases already.
assert(SrcRegs[0].SubReg == 0 && "should not have subreg operand");
unsigned NewVR = MRI.createVirtualRegister(NewRC);
Register NewVR = MRI.createVirtualRegister(NewRC);
MachineBasicBlock *MBB = OrigPHI.getParent();
MachineInstrBuilder MIB = BuildMI(*MBB, &OrigPHI, OrigPHI.getDebugLoc(),
TII.get(TargetOpcode::PHI), NewVR);
@ -1229,7 +1229,7 @@ PeepholeOptimizer::rewriteSource(MachineInstr &CopyLike,
// Insert the COPY.
const TargetRegisterClass *DefRC = MRI->getRegClass(Def.Reg);
unsigned NewVReg = MRI->createVirtualRegister(DefRC);
Register NewVReg = MRI->createVirtualRegister(DefRC);
MachineInstr *NewCopy =
BuildMI(*CopyLike.getParent(), &CopyLike, CopyLike.getDebugLoc(),
@ -1315,7 +1315,7 @@ bool PeepholeOptimizer::isLoadFoldable(
if (MCID.getNumDefs() != 1)
return false;
unsigned Reg = MI.getOperand(0).getReg();
Register Reg = MI.getOperand(0).getReg();
// To reduce compilation time, we check MRI->hasOneNonDBGUser when inserting
// loads. It should be checked when processing uses of the load, since
// uses can be removed during peephole.
@ -1335,7 +1335,7 @@ bool PeepholeOptimizer::isMoveImmediate(
return false;
if (MCID.getNumDefs() != 1)
return false;
unsigned Reg = MI.getOperand(0).getReg();
Register Reg = MI.getOperand(0).getReg();
if (Register::isVirtualRegister(Reg)) {
ImmDefMIs.insert(std::make_pair(Reg, &MI));
ImmDefRegs.insert(Reg);
@ -1358,7 +1358,7 @@ bool PeepholeOptimizer::foldImmediate(MachineInstr &MI,
// Ignore dead implicit defs.
if (MO.isImplicit() && MO.isDead())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Register::isVirtualRegister(Reg))
continue;
if (ImmDefRegs.count(Reg) == 0)
@ -1392,11 +1392,11 @@ bool PeepholeOptimizer::foldRedundantCopy(MachineInstr &MI,
DenseMap<unsigned, MachineInstr *> &CopyMIs) {
assert(MI.isCopy() && "expected a COPY machine instruction");
unsigned SrcReg = MI.getOperand(1).getReg();
Register SrcReg = MI.getOperand(1).getReg();
if (!Register::isVirtualRegister(SrcReg))
return false;
unsigned DstReg = MI.getOperand(0).getReg();
Register DstReg = MI.getOperand(0).getReg();
if (!Register::isVirtualRegister(DstReg))
return false;
@ -1415,7 +1415,7 @@ bool PeepholeOptimizer::foldRedundantCopy(MachineInstr &MI,
if (SrcSubReg != PrevSrcSubReg)
return false;
unsigned PrevDstReg = PrevCopy->getOperand(0).getReg();
Register PrevDstReg = PrevCopy->getOperand(0).getReg();
// Only replace if the copy register class is the same.
//
@ -1442,8 +1442,8 @@ bool PeepholeOptimizer::foldRedundantNAPhysCopy(
if (DisableNAPhysCopyOpt)
return false;
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = MI.getOperand(1).getReg();
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
if (isNAPhysCopy(SrcReg) && Register::isVirtualRegister(DstReg)) {
// %vreg = COPY %physreg
// Avoid using a datastructure which can track multiple live non-allocatable
@ -1465,7 +1465,7 @@ bool PeepholeOptimizer::foldRedundantNAPhysCopy(
return false;
}
unsigned PrevDstReg = PrevCopy->second->getOperand(0).getReg();
Register PrevDstReg = PrevCopy->second->getOperand(0).getReg();
if (PrevDstReg == SrcReg) {
// Remove the virt->phys copy: we saw the virtual register definition, and
// the non-allocatable physical register's state hasn't changed since then.
@ -1660,7 +1660,7 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
for (const MachineOperand &MO : MI->operands()) {
// Visit all operands: definitions can be implicit or explicit.
if (MO.isReg()) {
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (MO.isDef() && isNAPhysCopy(Reg)) {
const auto &Def = NAPhysToVirtMIs.find(Reg);
if (Def != NAPhysToVirtMIs.end()) {

View File

@ -73,7 +73,7 @@ bool ProcessImplicitDefs::canTurnIntoImplicitDef(MachineInstr *MI) {
void ProcessImplicitDefs::processImplicitDef(MachineInstr *MI) {
LLVM_DEBUG(dbgs() << "Processing " << *MI);
unsigned Reg = MI->getOperand(0).getReg();
Register Reg = MI->getOperand(0).getReg();
if (Register::isVirtualRegister(Reg)) {
// For virtual registers, mark all uses as <undef>, and convert users to
@ -100,7 +100,7 @@ void ProcessImplicitDefs::processImplicitDef(MachineInstr *MI) {
for (MachineOperand &MO : UserMI->operands()) {
if (!MO.isReg())
continue;
unsigned UserReg = MO.getReg();
Register UserReg = MO.getReg();
if (!Register::isPhysicalRegister(UserReg) ||
!TRI->regsOverlap(Reg, UserReg))
continue;

View File

@ -455,7 +455,7 @@ void RegAllocFast::usePhysReg(MachineOperand &MO) {
if (MO.isUndef())
return;
unsigned PhysReg = MO.getReg();
Register PhysReg = MO.getReg();
assert(Register::isPhysicalRegister(PhysReg) && "Bad usePhysReg operand");
markRegUsedInInstr(PhysReg);
@ -645,7 +645,7 @@ unsigned RegAllocFast::traceCopies(unsigned VirtReg) const {
unsigned C = 0;
for (const MachineInstr &MI : MRI->def_instructions(VirtReg)) {
if (isCoalescable(MI)) {
unsigned Reg = MI.getOperand(1).getReg();
Register Reg = MI.getOperand(1).getReg();
Reg = traceCopyChain(Reg);
if (Reg != 0)
return Reg;
@ -750,7 +750,7 @@ void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveReg &LR, unsigned Hint0) {
void RegAllocFast::allocVirtRegUndef(MachineOperand &MO) {
assert(MO.isUndef() && "expected undef use");
unsigned VirtReg = MO.getReg();
Register VirtReg = MO.getReg();
assert(Register::isVirtualRegister(VirtReg) && "Expected virtreg");
LiveRegMap::const_iterator LRI = findLiveVirtReg(VirtReg);
@ -889,7 +889,7 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI,
SmallSet<unsigned, 8> ThroughRegs;
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Register::isVirtualRegister(Reg))
continue;
if (MO.isEarlyClobber() || (MO.isUse() && MO.isTied()) ||
@ -904,7 +904,7 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI,
LLVM_DEBUG(dbgs() << "\nChecking for physdef collisions.\n");
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg() || !MO.isDef()) continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg || !Register::isPhysicalRegister(Reg))
continue;
markRegUsedInInstr(Reg);
@ -919,7 +919,7 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI,
for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
MachineOperand &MO = MI.getOperand(I);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Register::isVirtualRegister(Reg))
continue;
if (MO.isUse()) {
@ -945,7 +945,7 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI,
for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
const MachineOperand &MO = MI.getOperand(I);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Register::isVirtualRegister(Reg))
continue;
if (!MO.isEarlyClobber())
@ -960,7 +960,7 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI,
UsedInInstr.clear();
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg() || (MO.isDef() && !MO.isEarlyClobber())) continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg || !Register::isPhysicalRegister(Reg))
continue;
LLVM_DEBUG(dbgs() << "\tSetting " << printReg(Reg, TRI)
@ -1043,7 +1043,7 @@ void RegAllocFast::allocateInstruction(MachineInstr &MI) {
continue;
}
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg) continue;
if (Register::isVirtualRegister(Reg)) {
VirtOpEnd = i+1;
@ -1094,7 +1094,7 @@ void RegAllocFast::allocateInstruction(MachineInstr &MI) {
for (unsigned I = 0; I != VirtOpEnd; ++I) {
MachineOperand &MO = MI.getOperand(I);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Register::isVirtualRegister(Reg))
continue;
if (MO.isUse()) {
@ -1123,7 +1123,7 @@ void RegAllocFast::allocateInstruction(MachineInstr &MI) {
for (MachineOperand &MO : MI.uses()) {
if (!MO.isReg() || !MO.isUse())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Register::isVirtualRegister(Reg))
continue;
@ -1138,7 +1138,7 @@ void RegAllocFast::allocateInstruction(MachineInstr &MI) {
if (hasEarlyClobbers) {
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg || !Register::isPhysicalRegister(Reg))
continue;
// Look for physreg defs and tied uses.
@ -1166,7 +1166,7 @@ void RegAllocFast::allocateInstruction(MachineInstr &MI) {
const MachineOperand &MO = MI.getOperand(I);
if (!MO.isReg() || !MO.isDef() || !MO.getReg() || MO.isEarlyClobber())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg || !Register::isPhysicalRegister(Reg) || !MRI->isAllocatable(Reg))
continue;
@ -1179,7 +1179,7 @@ void RegAllocFast::allocateInstruction(MachineInstr &MI) {
const MachineOperand &MO = MI.getOperand(I);
if (!MO.isReg() || !MO.isDef() || !MO.getReg() || MO.isEarlyClobber())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
// We have already dealt with phys regs in the previous scan.
if (Register::isPhysicalRegister(Reg))
@ -1214,7 +1214,7 @@ void RegAllocFast::handleDebugValue(MachineInstr &MI) {
// mostly constants and frame indices.
if (!MO.isReg())
return;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Register::isVirtualRegister(Reg))
return;

View File

@ -2919,7 +2919,7 @@ void RAGreedy::tryHintRecoloring(LiveInterval &VirtReg) {
SmallVector<unsigned, 2> RecoloringCandidates;
HintsInfo Info;
unsigned Reg = VirtReg.reg;
unsigned PhysReg = VRM->getPhys(Reg);
Register PhysReg = VRM->getPhys(Reg);
// Start the recoloring algorithm from the input live-interval, then
// it will propagate to the ones that are copy-related with it.
Visited.insert(Reg);
@ -2940,7 +2940,7 @@ void RAGreedy::tryHintRecoloring(LiveInterval &VirtReg) {
// Get the live interval mapped with this virtual register to be able
// to check for the interference with the new color.
LiveInterval &LI = LIS->getInterval(Reg);
unsigned CurrPhys = VRM->getPhys(Reg);
Register CurrPhys = VRM->getPhys(Reg);
// Check that the new color matches the register class constraints and
// that it is free for this live range.
if (CurrPhys != PhysReg && (!MRI->getRegClass(Reg)->contains(PhysReg) ||

View File

@ -802,7 +802,7 @@ RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
return { false, false };
MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
unsigned NewReg = NewDstMO.getReg();
Register NewReg = NewDstMO.getReg();
if (NewReg != IntB.reg || !IntB.Query(AValNo->def).isKill())
return { false, false };
@ -1240,7 +1240,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
return false;
// Only support subregister destinations when the def is read-undef.
MachineOperand &DstOperand = CopyMI->getOperand(0);
unsigned CopyDstReg = DstOperand.getReg();
Register CopyDstReg = DstOperand.getReg();
if (DstOperand.getSubReg() && !DstOperand.isUndef())
return false;
@ -2411,7 +2411,7 @@ std::pair<const VNInfo*, unsigned> JoinVals::followCopyChain(
assert(MI && "No defining instruction");
if (!MI->isFullCopy())
return std::make_pair(VNI, TrackReg);
unsigned SrcReg = MI->getOperand(1).getReg();
Register SrcReg = MI->getOperand(1).getReg();
if (!Register::isVirtualRegister(SrcReg))
return std::make_pair(VNI, TrackReg);
@ -3189,7 +3189,7 @@ void JoinVals::eraseInstrs(SmallPtrSetImpl<MachineInstr*> &ErasedInstrs,
MachineInstr *MI = Indexes->getInstructionFromIndex(Def);
assert(MI && "No instruction to erase");
if (MI->isCopy()) {
unsigned Reg = MI->getOperand(1).getReg();
Register Reg = MI->getOperand(1).getReg();
if (Register::isVirtualRegister(Reg) && Reg != CP.getSrcReg() &&
Reg != CP.getDstReg())
ShrinkRegs.push_back(Reg);
@ -3463,8 +3463,8 @@ static bool isLocalCopy(MachineInstr *Copy, const LiveIntervals *LIS) {
if (Copy->getOperand(1).isUndef())
return false;
unsigned SrcReg = Copy->getOperand(1).getReg();
unsigned DstReg = Copy->getOperand(0).getReg();
Register SrcReg = Copy->getOperand(1).getReg();
Register DstReg = Copy->getOperand(0).getReg();
if (Register::isPhysicalRegister(SrcReg) ||
Register::isPhysicalRegister(DstReg))
return false;

View File

@ -499,7 +499,7 @@ class RegisterOperandsCollector {
void collectOperand(const MachineOperand &MO) const {
if (!MO.isReg() || !MO.getReg())
return;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (MO.isUse()) {
if (!MO.isUndef() && !MO.isInternalRead())
pushReg(Reg, RegOpers.Uses);
@ -530,7 +530,7 @@ class RegisterOperandsCollector {
void collectOperandLanes(const MachineOperand &MO) const {
if (!MO.isReg() || !MO.getReg())
return;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
unsigned SubRegIdx = MO.getSubReg();
if (MO.isUse()) {
if (!MO.isUndef() && !MO.isInternalRead())

View File

@ -133,7 +133,7 @@ void RegScavenger::determineKillsAndDefs() {
}
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Register::isPhysicalRegister(Reg) || isReserved(Reg))
continue;
@ -204,7 +204,7 @@ void RegScavenger::forward() {
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Register::isPhysicalRegister(Reg) || isReserved(Reg))
continue;
if (MO.isUse()) {
@ -694,7 +694,7 @@ static bool scavengeFrameVirtualRegsInBlock(MachineRegisterInfo &MRI,
for (const MachineOperand &MO : NMI.operands()) {
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
// We only care about virtual registers and ignore virtual registers
// created by the target callbacks in the process (those will be handled
// in a scavenging round).
@ -716,7 +716,7 @@ static bool scavengeFrameVirtualRegsInBlock(MachineRegisterInfo &MRI,
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
// Only vregs, no newly created vregs (see above).
if (!Register::isVirtualRegister(Reg) ||
Register::virtReg2Index(Reg) >= InitialNumVirtRegs)

View File

@ -138,7 +138,7 @@ bool RenameIndependentSubregs::renameComponents(LiveInterval &LI) const {
LLVM_DEBUG(dbgs() << printReg(Reg) << ": Splitting into newly created:");
for (unsigned I = 1, NumClasses = Classes.getNumClasses(); I < NumClasses;
++I) {
unsigned NewVReg = MRI->createVirtualRegister(RegClass);
Register NewVReg = MRI->createVirtualRegister(RegClass);
LiveInterval &NewLI = LIS->createEmptyInterval(NewVReg);
Intervals.push_back(&NewLI);
LLVM_DEBUG(dbgs() << ' ' << printReg(NewVReg));

View File

@ -205,7 +205,7 @@ void ScheduleDAGInstrs::addSchedBarrierDeps() {
if (ExitMI) {
for (const MachineOperand &MO : ExitMI->operands()) {
if (!MO.isReg() || MO.isDef()) continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Register::isPhysicalRegister(Reg)) {
Uses.insert(PhysRegSUOper(&ExitSU, -1, Reg));
} else if (Register::isVirtualRegister(Reg) && MO.readsReg()) {
@ -285,7 +285,7 @@ void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) {
void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) {
MachineInstr *MI = SU->getInstr();
MachineOperand &MO = MI->getOperand(OperIdx);
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
// We do not need to track any dependencies for constant registers.
if (MRI.isConstantPhysReg(Reg))
return;
@ -361,7 +361,7 @@ void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) {
LaneBitmask ScheduleDAGInstrs::getLaneMaskForMO(const MachineOperand &MO) const
{
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
// No point in tracking lanemasks if we don't have interesting subregisters.
const TargetRegisterClass &RC = *MRI.getRegClass(Reg);
if (!RC.HasDisjunctSubRegs)
@ -382,7 +382,7 @@ LaneBitmask ScheduleDAGInstrs::getLaneMaskForMO(const MachineOperand &MO) const
void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) {
MachineInstr *MI = SU->getInstr();
MachineOperand &MO = MI->getOperand(OperIdx);
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
LaneBitmask DefLaneMask;
LaneBitmask KillLaneMask;
@ -491,7 +491,7 @@ void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) {
void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) {
const MachineInstr *MI = SU->getInstr();
const MachineOperand &MO = MI->getOperand(OperIdx);
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
// Remember the use. Data dependencies will be added when we find the def.
LaneBitmask LaneMask = TrackLaneMasks ? getLaneMaskForMO(MO)
@ -821,7 +821,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
const MachineOperand &MO = MI.getOperand(j);
if (!MO.isReg() || !MO.isDef())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Register::isPhysicalRegister(Reg)) {
addPhysRegDeps(SU, j);
} else if (Register::isVirtualRegister(Reg)) {
@ -838,7 +838,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
// additional use dependencies.
if (!MO.isReg() || !MO.isUse())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Register::isPhysicalRegister(Reg)) {
addPhysRegDeps(SU, j);
} else if (Register::isVirtualRegister(Reg) && MO.readsReg()) {
@ -1071,7 +1071,7 @@ static void toggleKills(const MachineRegisterInfo &MRI, LivePhysRegs &LiveRegs,
for (MachineOperand &MO : MI.operands()) {
if (!MO.isReg() || !MO.readsReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg)
continue;
@ -1102,7 +1102,7 @@ void ScheduleDAGInstrs::fixupKills(MachineBasicBlock &MBB) {
if (MO.isReg()) {
if (!MO.isDef())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg)
continue;
LiveRegs.removeReg(Reg);

View File

@ -272,7 +272,7 @@ unsigned InstrEmitter::getVR(SDValue Op,
// does not include operand register class info.
const TargetRegisterClass *RC = TLI->getRegClassFor(
Op.getSimpleValueType(), Op.getNode()->isDivergent());
unsigned VReg = MRI->createVirtualRegister(RC);
Register VReg = MRI->createVirtualRegister(RC);
BuildMI(*MBB, InsertPos, Op.getDebugLoc(),
TII->get(TargetOpcode::IMPLICIT_DEF), VReg);
return VReg;
@ -319,7 +319,7 @@ InstrEmitter::AddRegisterOperand(MachineInstrBuilder &MIB,
if (!ConstrainedRC) {
OpRC = TRI->getAllocatableClass(OpRC);
assert(OpRC && "Constraints cannot be fulfilled for allocation");
unsigned NewVReg = MRI->createVirtualRegister(OpRC);
Register NewVReg = MRI->createVirtualRegister(OpRC);
BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(),
TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg);
VReg = NewVReg;
@ -386,7 +386,7 @@ void InstrEmitter::AddOperand(MachineInstrBuilder &MIB,
: nullptr;
if (OpRC && IIRC && OpRC != IIRC && Register::isVirtualRegister(VReg)) {
unsigned NewVReg = MRI->createVirtualRegister(IIRC);
Register NewVReg = MRI->createVirtualRegister(IIRC);
BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(),
TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg);
VReg = NewVReg;
@ -464,7 +464,7 @@ unsigned InstrEmitter::ConstrainForSubReg(unsigned VReg, unsigned SubIdx,
// register instead.
RC = TRI->getSubClassWithSubReg(TLI->getRegClassFor(VT, isDivergent), SubIdx);
assert(RC && "No legal register class for VT supports that SubIdx");
unsigned NewReg = MRI->createVirtualRegister(RC);
Register NewReg = MRI->createVirtualRegister(RC);
BuildMI(*MBB, InsertPos, DL, TII->get(TargetOpcode::COPY), NewReg)
.addReg(VReg);
return NewReg;
@ -613,7 +613,7 @@ InstrEmitter::EmitCopyToRegClassNode(SDNode *Node,
unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
const TargetRegisterClass *DstRC =
TRI->getAllocatableClass(TRI->getRegClass(DstRCIdx));
unsigned NewVReg = MRI->createVirtualRegister(DstRC);
Register NewVReg = MRI->createVirtualRegister(DstRC);
BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
NewVReg).addReg(VReg);
@ -630,7 +630,7 @@ void InstrEmitter::EmitRegSequence(SDNode *Node,
bool IsClone, bool IsCloned) {
unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
unsigned NewVReg = MRI->createVirtualRegister(TRI->getAllocatableClass(RC));
Register NewVReg = MRI->createVirtualRegister(TRI->getAllocatableClass(RC));
const MCInstrDesc &II = TII->get(TargetOpcode::REG_SEQUENCE);
MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II, NewVReg);
unsigned NumOps = Node->getNumOperands();

View File

@ -808,7 +808,7 @@ EmitPhysRegCopy(SUnit *SU, DenseMap<SUnit*, unsigned> &VRBaseMap,
} else {
// Copy from physical register.
assert(I->getReg() && "Unknown physical register!");
unsigned VRBase = MRI.createVirtualRegister(SU->CopyDstRC);
Register VRBase = MRI.createVirtualRegister(SU->CopyDstRC);
bool isNew = VRBaseMap.insert(std::make_pair(SU, VRBase)).second;
(void)isNew; // Silence compiler warning.
assert(isNew && "Node emitted out of order - early");

View File

@ -4117,7 +4117,7 @@ void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
SDValue Src = getValue(SrcV);
// Create a virtual register, then update the virtual register.
unsigned VReg =
Register VReg =
SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand());
// Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
// Chain can be getRoot or getControlRoot.
@ -7154,7 +7154,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
if (SwiftErrorVal && TLI.supportSwiftError()) {
// Get the last element of InVals.
SDValue Src = CLI.InVals.back();
unsigned VReg = SwiftError.getOrCreateVRegDefAt(
Register VReg = SwiftError.getOrCreateVRegDefAt(
CS.getInstruction(), FuncInfo.MBB, SwiftErrorVal);
SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
DAG.setRoot(CopyNode);
@ -9725,7 +9725,8 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
MachineFunction& MF = SDB->DAG.getMachineFunction();
MachineRegisterInfo& RegInfo = MF.getRegInfo();
unsigned SRetReg = RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
Register SRetReg =
RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
FuncInfo->DemoteRegister = SRetReg;
NewRoot =
SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);

View File

@ -79,7 +79,7 @@ bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI,
const CCValAssign &ArgLoc = ArgLocs[I];
if (!ArgLoc.isRegLoc())
continue;
unsigned Reg = ArgLoc.getLocReg();
Register Reg = ArgLoc.getLocReg();
// Only look at callee saved registers.
if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg))
continue;

View File

@ -278,7 +278,7 @@ bool ShrinkWrap::useOrDefCSROrFI(const MachineInstr &MI,
// Ignore instructions like DBG_VALUE which don't read/def the register.
if (!MO.isDef() && !MO.readsReg())
continue;
unsigned PhysReg = MO.getReg();
Register PhysReg = MO.getReg();
if (!PhysReg)
continue;
assert(Register::isPhysicalRegister(PhysReg) && "Unallocated register?!");

View File

@ -437,7 +437,7 @@ void SplitEditor::addDeadDef(LiveInterval &LI, VNInfo *VNI, bool Original) {
assert(DefMI != nullptr);
LaneBitmask LM;
for (const MachineOperand &DefOp : DefMI->defs()) {
unsigned R = DefOp.getReg();
Register R = DefOp.getReg();
if (R != LI.reg)
continue;
if (unsigned SR = DefOp.getSubReg())
@ -1373,7 +1373,7 @@ void SplitEditor::rewriteAssigned(bool ExtendRanges) {
assert(LI.hasSubRanges());
LiveRangeCalc SubLRC;
unsigned Reg = EP.MO.getReg(), Sub = EP.MO.getSubReg();
Register Reg = EP.MO.getReg(), Sub = EP.MO.getSubReg();
LaneBitmask LM = Sub != 0 ? TRI.getSubRegIndexLaneMask(Sub)
: MRI.getMaxLaneMaskForVReg(Reg);
for (LiveInterval::SubRange &S : LI.subranges()) {

View File

@ -113,7 +113,7 @@ StackMaps::parseOperand(MachineInstr::const_mop_iterator MOI,
unsigned Size = DL.getPointerSizeInBits();
assert((Size % 8) == 0 && "Need pointer size in bytes.");
Size /= 8;
unsigned Reg = (++MOI)->getReg();
Register Reg = (++MOI)->getReg();
int64_t Imm = (++MOI)->getImm();
Locs.emplace_back(StackMaps::Location::Direct, Size,
getDwarfRegNum(Reg, TRI), Imm);
@ -122,7 +122,7 @@ StackMaps::parseOperand(MachineInstr::const_mop_iterator MOI,
case StackMaps::IndirectMemRefOp: {
int64_t Size = (++MOI)->getImm();
assert(Size > 0 && "Need a valid size for indirect memory locations.");
unsigned Reg = (++MOI)->getReg();
Register Reg = (++MOI)->getReg();
int64_t Imm = (++MOI)->getImm();
Locs.emplace_back(StackMaps::Location::Indirect, Size,
getDwarfRegNum(Reg, TRI), Imm);

View File

@ -235,8 +235,8 @@ bool TailDuplicator::tailDuplicateAndUpdate(
MachineInstr *Copy = Copies[i];
if (!Copy->isCopy())
continue;
unsigned Dst = Copy->getOperand(0).getReg();
unsigned Src = Copy->getOperand(1).getReg();
Register Dst = Copy->getOperand(0).getReg();
Register Src = Copy->getOperand(1).getReg();
if (MRI->hasOneNonDBGUse(Src) &&
MRI->constrainRegClass(Src, MRI->getRegClass(Dst))) {
// Copy is the only use. Do trivial copy propagation here.
@ -312,7 +312,7 @@ static void getRegsUsedByPHIs(const MachineBasicBlock &BB,
if (!MI.isPHI())
break;
for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
unsigned SrcReg = MI.getOperand(i).getReg();
Register SrcReg = MI.getOperand(i).getReg();
UsedByPhi->insert(SrcReg);
}
}
@ -340,17 +340,17 @@ void TailDuplicator::processPHI(
DenseMap<unsigned, RegSubRegPair> &LocalVRMap,
SmallVectorImpl<std::pair<unsigned, RegSubRegPair>> &Copies,
const DenseSet<unsigned> &RegsUsedByPhi, bool Remove) {
unsigned DefReg = MI->getOperand(0).getReg();
Register DefReg = MI->getOperand(0).getReg();
unsigned SrcOpIdx = getPHISrcRegOpIdx(MI, PredBB);
assert(SrcOpIdx && "Unable to find matching PHI source?");
unsigned SrcReg = MI->getOperand(SrcOpIdx).getReg();
Register SrcReg = MI->getOperand(SrcOpIdx).getReg();
unsigned SrcSubReg = MI->getOperand(SrcOpIdx).getSubReg();
const TargetRegisterClass *RC = MRI->getRegClass(DefReg);
LocalVRMap.insert(std::make_pair(DefReg, RegSubRegPair(SrcReg, SrcSubReg)));
// Insert a copy from source to the end of the block. The def register is the
// available value liveout of the block.
unsigned NewDef = MRI->createVirtualRegister(RC);
Register NewDef = MRI->createVirtualRegister(RC);
Copies.push_back(std::make_pair(NewDef, RegSubRegPair(SrcReg, SrcSubReg)));
if (isDefLiveOut(DefReg, TailBB, MRI) || RegsUsedByPhi.count(DefReg))
addSSAUpdateEntry(DefReg, NewDef, PredBB);
@ -384,12 +384,12 @@ void TailDuplicator::duplicateInstruction(
MachineOperand &MO = NewMI.getOperand(i);
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Register::isVirtualRegister(Reg))
continue;
if (MO.isDef()) {
const TargetRegisterClass *RC = MRI->getRegClass(Reg);
unsigned NewReg = MRI->createVirtualRegister(RC);
Register NewReg = MRI->createVirtualRegister(RC);
MO.setReg(NewReg);
LocalVRMap.insert(std::make_pair(Reg, RegSubRegPair(NewReg, 0)));
if (isDefLiveOut(Reg, TailBB, MRI) || UsedByPhi.count(Reg))
@ -433,7 +433,7 @@ void TailDuplicator::duplicateInstruction(
auto *NewRC = MI->getRegClassConstraint(i, TII, TRI);
if (NewRC == nullptr)
NewRC = OrigRC;
unsigned NewReg = MRI->createVirtualRegister(NewRC);
Register NewReg = MRI->createVirtualRegister(NewRC);
BuildMI(*PredBB, NewMI, NewMI.getDebugLoc(),
TII->get(TargetOpcode::COPY), NewReg)
.addReg(VI->second.Reg, 0, VI->second.SubReg);
@ -477,7 +477,7 @@ void TailDuplicator::updateSuccessorsPHIs(
assert(Idx != 0);
MachineOperand &MO0 = MI.getOperand(Idx);
unsigned Reg = MO0.getReg();
Register Reg = MO0.getReg();
if (isDead) {
// Folded into the previous BB.
// There could be duplicate phi source entries. FIXME: Should sdisel

View File

@ -443,8 +443,8 @@ static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI,
if (FoldOp.getSubReg() || LiveOp.getSubReg())
return nullptr;
unsigned FoldReg = FoldOp.getReg();
unsigned LiveReg = LiveOp.getReg();
Register FoldReg = FoldOp.getReg();
Register LiveReg = LiveOp.getReg();
assert(Register::isVirtualRegister(FoldReg) && "Cannot fold physregs");
@ -805,11 +805,11 @@ void TargetInstrInfo::reassociateOps(
MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]);
MachineOperand &OpC = Root.getOperand(0);
unsigned RegA = OpA.getReg();
unsigned RegB = OpB.getReg();
unsigned RegX = OpX.getReg();
unsigned RegY = OpY.getReg();
unsigned RegC = OpC.getReg();
Register RegA = OpA.getReg();
Register RegB = OpB.getReg();
Register RegX = OpX.getReg();
Register RegY = OpY.getReg();
Register RegC = OpC.getReg();
if (Register::isVirtualRegister(RegA))
MRI.constrainRegClass(RegA, RC);
@ -825,7 +825,7 @@ void TargetInstrInfo::reassociateOps(
// Create a new virtual register for the result of (X op Y) instead of
// recycling RegB because the MachineCombiner's computation of the critical
// path requires a new register definition rather than an existing one.
unsigned NewVR = MRI.createVirtualRegister(RC);
Register NewVR = MRI.createVirtualRegister(RC);
InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
unsigned Opcode = Root.getOpcode();
@ -887,7 +887,7 @@ bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
// Remat clients assume operand 0 is the defined register.
if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
return false;
unsigned DefReg = MI.getOperand(0).getReg();
Register DefReg = MI.getOperand(0).getReg();
// A sub-register definition can only be rematerialized if the instruction
// doesn't read the other parts of the register. Otherwise it is really a
@ -924,7 +924,7 @@ bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Reg == 0)
continue;

View File

@ -300,7 +300,7 @@ computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
// TODO: The following hack exists because predication passes do not
// correctly append imp-use operands, and readsReg() strangely returns false
// for predicated defs.
unsigned Reg = DefMI->getOperand(DefOperIdx).getReg();
Register Reg = DefMI->getOperand(DefOperIdx).getReg();
const MachineFunction &MF = *DefMI->getMF();
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!DepMI->readsRegister(Reg, TRI) && TII->isPredicated(*DepMI))

View File

@ -230,7 +230,7 @@ sink3AddrInstruction(MachineInstr *MI, unsigned SavedReg,
for (const MachineOperand &MO : MI->operands()) {
if (!MO.isReg())
continue;
unsigned MOReg = MO.getReg();
Register MOReg = MO.getReg();
if (!MOReg)
continue;
if (MO.isUse() && MOReg != SavedReg)
@ -299,7 +299,7 @@ sink3AddrInstruction(MachineInstr *MI, unsigned SavedReg,
MachineOperand &MO = OtherMI.getOperand(i);
if (!MO.isReg())
continue;
unsigned MOReg = MO.getReg();
Register MOReg = MO.getReg();
if (!MOReg)
continue;
if (DefReg == MOReg)
@ -682,7 +682,7 @@ bool TwoAddressInstructionPass::commuteInstruction(MachineInstr *MI,
unsigned RegBIdx,
unsigned RegCIdx,
unsigned Dist) {
unsigned RegC = MI->getOperand(RegCIdx).getReg();
Register RegC = MI->getOperand(RegCIdx).getReg();
LLVM_DEBUG(dbgs() << "2addr: COMMUTING : " << *MI);
MachineInstr *NewMI = TII->commuteInstruction(*MI, false, RegBIdx, RegCIdx);
@ -699,7 +699,7 @@ bool TwoAddressInstructionPass::commuteInstruction(MachineInstr *MI,
// Update source register map.
unsigned FromRegC = getMappedReg(RegC, SrcRegMap);
if (FromRegC) {
unsigned RegA = MI->getOperand(DstIdx).getReg();
Register RegA = MI->getOperand(DstIdx).getReg();
SrcRegMap[RegA] = FromRegC;
}
@ -910,7 +910,7 @@ rescheduleMIBelowKill(MachineBasicBlock::iterator &mi,
for (const MachineOperand &MO : MI->operands()) {
if (!MO.isReg())
continue;
unsigned MOReg = MO.getReg();
Register MOReg = MO.getReg();
if (!MOReg)
continue;
if (MO.isDef())
@ -954,7 +954,7 @@ rescheduleMIBelowKill(MachineBasicBlock::iterator &mi,
for (const MachineOperand &MO : OtherMI.operands()) {
if (!MO.isReg())
continue;
unsigned MOReg = MO.getReg();
Register MOReg = MO.getReg();
if (!MOReg)
continue;
if (MO.isDef()) {
@ -1092,7 +1092,7 @@ rescheduleKillAboveMI(MachineBasicBlock::iterator &mi,
for (const MachineOperand &MO : KillMI->operands()) {
if (!MO.isReg())
continue;
unsigned MOReg = MO.getReg();
Register MOReg = MO.getReg();
if (MO.isUse()) {
if (!MOReg)
continue;
@ -1129,7 +1129,7 @@ rescheduleKillAboveMI(MachineBasicBlock::iterator &mi,
for (const MachineOperand &MO : OtherMI.operands()) {
if (!MO.isReg())
continue;
unsigned MOReg = MO.getReg();
Register MOReg = MO.getReg();
if (!MOReg)
continue;
if (MO.isUse()) {
@ -1206,8 +1206,8 @@ bool TwoAddressInstructionPass::tryInstructionCommute(MachineInstr *MI,
return false;
bool MadeChange = false;
unsigned DstOpReg = MI->getOperand(DstOpIdx).getReg();
unsigned BaseOpReg = MI->getOperand(BaseOpIdx).getReg();
Register DstOpReg = MI->getOperand(DstOpIdx).getReg();
Register BaseOpReg = MI->getOperand(BaseOpIdx).getReg();
unsigned OpsNum = MI->getDesc().getNumOperands();
unsigned OtherOpIdx = MI->getDesc().getNumDefs();
for (; OtherOpIdx < OpsNum; OtherOpIdx++) {
@ -1219,7 +1219,7 @@ bool TwoAddressInstructionPass::tryInstructionCommute(MachineInstr *MI,
!TII->findCommutedOpIndices(*MI, BaseOpIdx, OtherOpIdx))
continue;
unsigned OtherOpReg = MI->getOperand(OtherOpIdx).getReg();
Register OtherOpReg = MI->getOperand(OtherOpIdx).getReg();
bool AggressiveCommute = false;
// If OtherOp dies but BaseOp does not, swap the OtherOp and BaseOp
@ -1274,8 +1274,8 @@ tryInstructionTransform(MachineBasicBlock::iterator &mi,
return false;
MachineInstr &MI = *mi;
unsigned regA = MI.getOperand(DstIdx).getReg();
unsigned regB = MI.getOperand(SrcIdx).getReg();
Register regA = MI.getOperand(DstIdx).getReg();
Register regB = MI.getOperand(SrcIdx).getReg();
assert(Register::isVirtualRegister(regB) &&
"cannot make instruction into two-address form");
@ -1361,7 +1361,7 @@ tryInstructionTransform(MachineBasicBlock::iterator &mi,
const TargetRegisterClass *RC =
TRI->getAllocatableClass(
TII->getRegClass(UnfoldMCID, LoadRegIndex, TRI, *MF));
unsigned Reg = MRI->createVirtualRegister(RC);
Register Reg = MRI->createVirtualRegister(RC);
SmallVector<MachineInstr *, 2> NewMIs;
if (!TII->unfoldMemoryOperand(*MF, MI, Reg,
/*UnfoldLoad=*/true,
@ -1471,8 +1471,8 @@ collectTiedOperands(MachineInstr *MI, TiedOperandMap &TiedOperands) {
AnyOps = true;
MachineOperand &SrcMO = MI->getOperand(SrcIdx);
MachineOperand &DstMO = MI->getOperand(DstIdx);
unsigned SrcReg = SrcMO.getReg();
unsigned DstReg = DstMO.getReg();
Register SrcReg = SrcMO.getReg();
Register DstReg = DstMO.getReg();
// Tied constraint already satisfied?
if (SrcReg == DstReg)
continue;
@ -1519,7 +1519,7 @@ TwoAddressInstructionPass::processTiedPairs(MachineInstr *MI,
unsigned DstIdx = TiedPairs[tpi].second;
const MachineOperand &DstMO = MI->getOperand(DstIdx);
unsigned RegA = DstMO.getReg();
Register RegA = DstMO.getReg();
// Grab RegB from the instruction because it may have changed if the
// instruction was commuted.
@ -1739,8 +1739,8 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) {
if (TiedPairs.size() == 1) {
unsigned SrcIdx = TiedPairs[0].first;
unsigned DstIdx = TiedPairs[0].second;
unsigned SrcReg = mi->getOperand(SrcIdx).getReg();
unsigned DstReg = mi->getOperand(DstIdx).getReg();
Register SrcReg = mi->getOperand(SrcIdx).getReg();
Register DstReg = mi->getOperand(DstIdx).getReg();
if (SrcReg != DstReg &&
tryInstructionTransform(mi, nmi, SrcIdx, DstIdx, Dist, false)) {
// The tied operands have been eliminated or shifted further down
@ -1798,7 +1798,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) {
void TwoAddressInstructionPass::
eliminateRegSequence(MachineBasicBlock::iterator &MBBI) {
MachineInstr &MI = *MBBI;
unsigned DstReg = MI.getOperand(0).getReg();
Register DstReg = MI.getOperand(0).getReg();
if (MI.getOperand(0).getSubReg() || Register::isPhysicalRegister(DstReg) ||
!(MI.getNumOperands() & 1)) {
LLVM_DEBUG(dbgs() << "Illegal REG_SEQUENCE instruction:" << MI);
@ -1815,7 +1815,7 @@ eliminateRegSequence(MachineBasicBlock::iterator &MBBI) {
bool DefEmitted = false;
for (unsigned i = 1, e = MI.getNumOperands(); i < e; i += 2) {
MachineOperand &UseMO = MI.getOperand(i);
unsigned SrcReg = UseMO.getReg();
Register SrcReg = UseMO.getReg();
unsigned SubIdx = MI.getOperand(i+1).getImm();
// Nothing needs to be inserted for undef operands.
if (UseMO.isUndef())

View File

@ -173,8 +173,8 @@ bool UnreachableMachineBlockElim::runOnMachineFunction(MachineFunction &F) {
if (phi->getNumOperands() == 3) {
const MachineOperand &Input = phi->getOperand(1);
const MachineOperand &Output = phi->getOperand(0);
unsigned InputReg = Input.getReg();
unsigned OutputReg = Output.getReg();
Register InputReg = Input.getReg();
Register OutputReg = Output.getReg();
assert(Output.getSubReg() == 0 && "Cannot have output subregister");
ModifiedPHI = true;

View File

@ -677,7 +677,7 @@ AMDGPUAsmPrinter::SIFunctionResourceInfo AMDGPUAsmPrinter::analyzeResourceUsage(
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
switch (Reg) {
case AMDGPU::EXEC:
case AMDGPU::EXEC_LO:

View File

@ -220,7 +220,7 @@ AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
if (MO.isReg()) {
unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
.addReg(Reg, 0, ComposedSubIdx);
@ -676,12 +676,12 @@ bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
MachineRegisterInfo &MRI = MF->getRegInfo();
const DebugLoc &DL = I.getDebugLoc();
unsigned SrcReg = I.getOperand(2).getReg();
Register SrcReg = I.getOperand(2).getReg();
unsigned Size = RBI.getSizeInBits(SrcReg, MRI, TRI);
auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
unsigned CCReg = I.getOperand(0).getReg();
Register CCReg = I.getOperand(0).getReg();
if (isSCC(CCReg, MRI)) {
int Opcode = getS_CMPOpcode(Pred, Size);
if (Opcode == -1)
@ -758,9 +758,9 @@ bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
const DebugLoc &DL = I.getDebugLoc();
int64_t Tgt = getConstant(MRI.getVRegDef(I.getOperand(1).getReg()));
int64_t Enabled = getConstant(MRI.getVRegDef(I.getOperand(2).getReg()));
unsigned Reg0 = I.getOperand(3).getReg();
unsigned Reg1 = I.getOperand(4).getReg();
unsigned Undef = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register Reg0 = I.getOperand(3).getReg();
Register Reg1 = I.getOperand(4).getReg();
Register Undef = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
int64_t Done = getConstant(MRI.getVRegDef(I.getOperand(5).getReg()));
int64_t VM = getConstant(MRI.getVRegDef(I.getOperand(6).getReg()));
@ -796,11 +796,11 @@ bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
MachineRegisterInfo &MRI = MF->getRegInfo();
const DebugLoc &DL = I.getDebugLoc();
unsigned DstReg = I.getOperand(0).getReg();
Register DstReg = I.getOperand(0).getReg();
unsigned Size = RBI.getSizeInBits(DstReg, MRI, TRI);
assert(Size <= 32 || Size == 64);
const MachineOperand &CCOp = I.getOperand(1);
unsigned CCReg = CCOp.getReg();
Register CCReg = CCOp.getReg();
if (isSCC(CCReg, MRI)) {
unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
AMDGPU::S_CSELECT_B32;
@ -870,8 +870,8 @@ bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
unsigned DstReg = I.getOperand(0).getReg();
unsigned SrcReg = I.getOperand(1).getReg();
Register DstReg = I.getOperand(0).getReg();
Register SrcReg = I.getOperand(1).getReg();
const LLT DstTy = MRI.getType(DstReg);
const LLT SrcTy = MRI.getType(SrcReg);
if (!DstTy.isScalar())
@ -927,8 +927,8 @@ bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
MachineBasicBlock &MBB = *I.getParent();
MachineFunction &MF = *MBB.getParent();
MachineRegisterInfo &MRI = MF.getRegInfo();
const unsigned DstReg = I.getOperand(0).getReg();
const unsigned SrcReg = I.getOperand(1).getReg();
const Register DstReg = I.getOperand(0).getReg();
const Register SrcReg = I.getOperand(1).getReg();
const LLT DstTy = MRI.getType(DstReg);
const LLT SrcTy = MRI.getType(SrcReg);
@ -951,7 +951,7 @@ bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
// FIXME: Create an extra copy to avoid incorrectly constraining the result
// of the scc producer.
unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
Register TmpReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
BuildMI(MBB, I, DL, TII.get(AMDGPU::COPY), TmpReg)
.addReg(SrcReg);
BuildMI(MBB, I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
@ -1026,10 +1026,8 @@ bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
// Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
if (DstSize > 32 && SrcSize <= 32) {
// We need a 64-bit register source, but the high bits don't matter.
unsigned ExtReg
= MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned UndefReg
= MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
Register ExtReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
Register UndefReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
.addReg(SrcReg)
@ -1077,7 +1075,7 @@ bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue());
}
unsigned DstReg = I.getOperand(0).getReg();
Register DstReg = I.getOperand(0).getReg();
unsigned Size;
bool IsSgpr;
const RegisterBank *RB = MRI.getRegBankOrNull(I.getOperand(0).getReg());
@ -1103,8 +1101,8 @@ bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
DebugLoc DL = I.getDebugLoc();
const TargetRegisterClass *RC = IsSgpr ? &AMDGPU::SReg_32_XM0RegClass :
&AMDGPU::VGPR_32RegClass;
unsigned LoReg = MRI.createVirtualRegister(RC);
unsigned HiReg = MRI.createVirtualRegister(RC);
Register LoReg = MRI.createVirtualRegister(RC);
Register HiReg = MRI.createVirtualRegister(RC);
const APInt &Imm = APInt(Size, I.getOperand(1).getImm());
BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
@ -1516,7 +1514,7 @@ AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
// failed trying to select this load into one of the _IMM variants since
// the _IMM Patterns are considered before the _SGPR patterns.
unsigned PtrReg = GEPInfo.SgprParts[0];
unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Register OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
.addImm(GEPInfo.Imm);
return {{

View File

@ -1023,7 +1023,7 @@ void LinearizedRegion::removeFalseRegisterKills(MachineRegisterInfo *MRI) {
for (auto &II : *MBB) {
for (auto &RI : II.uses()) {
if (RI.isReg()) {
unsigned Reg = RI.getReg();
Register Reg = RI.getReg();
if (Register::isVirtualRegister(Reg)) {
if (hasNoDef(Reg, MRI))
continue;
@ -1404,7 +1404,7 @@ void AMDGPUMachineCFGStructurizer::storePHILinearizationInfoDest(
unsigned AMDGPUMachineCFGStructurizer::storePHILinearizationInfo(
MachineInstr &PHI, SmallVector<unsigned, 2> *RegionIndices) {
unsigned DestReg = getPHIDestReg(PHI);
unsigned LinearizeDestReg =
Register LinearizeDestReg =
MRI->createVirtualRegister(MRI->getRegClass(DestReg));
PHIInfo.addDest(LinearizeDestReg, PHI.getDebugLoc());
storePHILinearizationInfoDest(LinearizeDestReg, PHI, RegionIndices);
@ -1892,7 +1892,7 @@ void AMDGPUMachineCFGStructurizer::ensureCondIsNotKilled(
if (!Cond[0].isReg())
return;
unsigned CondReg = Cond[0].getReg();
Register CondReg = Cond[0].getReg();
for (auto UI = MRI->use_begin(CondReg), E = MRI->use_end(); UI != E; ++UI) {
(*UI).setIsKill(false);
}
@ -1931,8 +1931,8 @@ void AMDGPUMachineCFGStructurizer::rewriteCodeBBTerminator(MachineBasicBlock *Co
BBSelectReg, TrueBB->getNumber());
} else {
const TargetRegisterClass *RegClass = MRI->getRegClass(BBSelectReg);
unsigned TrueBBReg = MRI->createVirtualRegister(RegClass);
unsigned FalseBBReg = MRI->createVirtualRegister(RegClass);
Register TrueBBReg = MRI->createVirtualRegister(RegClass);
Register FalseBBReg = MRI->createVirtualRegister(RegClass);
TII->materializeImmediate(*CodeBB, CodeBB->getFirstTerminator(), DL,
TrueBBReg, TrueBB->getNumber());
TII->materializeImmediate(*CodeBB, CodeBB->getFirstTerminator(), DL,
@ -1998,7 +1998,7 @@ void AMDGPUMachineCFGStructurizer::insertChainedPHI(MachineBasicBlock *IfBB,
InnerRegion->replaceRegisterOutsideRegion(SourceReg, DestReg, false, MRI);
}
const TargetRegisterClass *RegClass = MRI->getRegClass(DestReg);
unsigned NextDestReg = MRI->createVirtualRegister(RegClass);
Register NextDestReg = MRI->createVirtualRegister(RegClass);
bool IsLastDef = PHIInfo.getNumSources(DestReg) == 1;
LLVM_DEBUG(dbgs() << "Insert Chained PHI\n");
insertMergePHI(IfBB, InnerRegion->getExit(), MergeBB, DestReg, NextDestReg,
@ -2058,8 +2058,8 @@ void AMDGPUMachineCFGStructurizer::rewriteLiveOutRegs(MachineBasicBlock *IfBB,
// register, unless it is the outgoing BB select register. We have
// already creaed phi nodes for these.
const TargetRegisterClass *RegClass = MRI->getRegClass(Reg);
unsigned PHIDestReg = MRI->createVirtualRegister(RegClass);
unsigned IfSourceReg = MRI->createVirtualRegister(RegClass);
Register PHIDestReg = MRI->createVirtualRegister(RegClass);
Register IfSourceReg = MRI->createVirtualRegister(RegClass);
// Create initializer, this value is never used, but is needed
// to satisfy SSA.
LLVM_DEBUG(dbgs() << "Initializer for reg: " << printReg(Reg) << "\n");
@ -2174,7 +2174,7 @@ void AMDGPUMachineCFGStructurizer::createEntryPHI(LinearizedRegion *CurrentRegio
MachineBasicBlock *PHIDefMBB = PHIDefInstr->getParent();
const TargetRegisterClass *RegClass =
MRI->getRegClass(CurrentBackedgeReg);
unsigned NewBackedgeReg = MRI->createVirtualRegister(RegClass);
Register NewBackedgeReg = MRI->createVirtualRegister(RegClass);
MachineInstrBuilder BackedgePHI =
BuildMI(*PHIDefMBB, PHIDefMBB->instr_begin(), DL,
TII->get(TargetOpcode::PHI), NewBackedgeReg);
@ -2311,7 +2311,7 @@ MachineBasicBlock *AMDGPUMachineCFGStructurizer::createIfRegion(
} else {
// Handle internal block.
const TargetRegisterClass *RegClass = MRI->getRegClass(BBSelectRegIn);
unsigned CodeBBSelectReg = MRI->createVirtualRegister(RegClass);
Register CodeBBSelectReg = MRI->createVirtualRegister(RegClass);
rewriteCodeBBTerminator(CodeBB, MergeBB, CodeBBSelectReg);
bool IsRegionEntryBB = CurrentRegion->getEntry() == CodeBB;
MachineBasicBlock *IfBB = createIfBlock(MergeBB, CodeBB, CodeBB, CodeBB,
@ -2448,7 +2448,7 @@ void AMDGPUMachineCFGStructurizer::splitLoopPHI(MachineInstr &PHI,
}
const TargetRegisterClass *RegClass = MRI->getRegClass(PHIDest);
unsigned NewDestReg = MRI->createVirtualRegister(RegClass);
Register NewDestReg = MRI->createVirtualRegister(RegClass);
LRegion->replaceRegisterInsideRegion(PHIDest, NewDestReg, false, MRI);
MachineInstrBuilder MIB =
BuildMI(*EntrySucc, EntrySucc->instr_begin(), PHI.getDebugLoc(),
@ -2736,9 +2736,9 @@ bool AMDGPUMachineCFGStructurizer::structurizeComplexRegion(RegionMRT *Region) {
}
const DebugLoc &DL = NewSucc->findDebugLoc(NewSucc->getFirstNonPHI());
unsigned InReg = LRegion->getBBSelectRegIn();
unsigned InnerSelectReg =
Register InnerSelectReg =
MRI->createVirtualRegister(MRI->getRegClass(InReg));
unsigned NewInReg = MRI->createVirtualRegister(MRI->getRegClass(InReg));
Register NewInReg = MRI->createVirtualRegister(MRI->getRegClass(InReg));
TII->materializeImmediate(*(LRegion->getEntry()),
LRegion->getEntry()->getFirstTerminator(), DL,
NewInReg, Region->getEntry()->getNumber());

View File

@ -797,7 +797,7 @@ void AMDGPURegisterBankInfo::executeInWaterfallLoop(
unsigned NumPieces = Unmerge->getNumOperands() - 1;
for (unsigned PieceIdx = 0; PieceIdx != NumPieces; ++PieceIdx) {
unsigned UnmergePiece = Unmerge.getReg(PieceIdx);
Register UnmergePiece = Unmerge.getReg(PieceIdx);
Register CurrentLaneOpReg;
if (Is64) {
@ -1548,7 +1548,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
int ResultBank = -1;
for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
unsigned Reg = MI.getOperand(I).getReg();
Register Reg = MI.getOperand(I).getReg();
const RegisterBank *Bank = getRegBank(Reg, MRI, *TRI);
// FIXME: Assuming VGPR for any undetermined inputs.
@ -2053,7 +2053,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
}
case Intrinsic::amdgcn_readlane: {
// This must be an SGPR, but accept a VGPR.
unsigned IdxReg = MI.getOperand(3).getReg();
Register IdxReg = MI.getOperand(3).getReg();
unsigned IdxSize = MRI.getType(IdxReg).getSizeInBits();
unsigned IdxBank = getRegBankID(IdxReg, MRI, *TRI, AMDGPU::SGPRRegBankID);
OpdsMapping[3] = AMDGPU::getValueMapping(IdxBank, IdxSize);
@ -2068,10 +2068,10 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
}
case Intrinsic::amdgcn_writelane: {
unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
unsigned SrcReg = MI.getOperand(2).getReg();
Register SrcReg = MI.getOperand(2).getReg();
unsigned SrcSize = MRI.getType(SrcReg).getSizeInBits();
unsigned SrcBank = getRegBankID(SrcReg, MRI, *TRI, AMDGPU::SGPRRegBankID);
unsigned IdxReg = MI.getOperand(3).getReg();
Register IdxReg = MI.getOperand(3).getReg();
unsigned IdxSize = MRI.getType(IdxReg).getSizeInBits();
unsigned IdxBank = getRegBankID(IdxReg, MRI, *TRI, AMDGPU::SGPRRegBankID);
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, DstSize);

View File

@ -1307,7 +1307,7 @@ int AMDGPUCFGStructurizer::improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
if (LandBlkHasOtherPred) {
report_fatal_error("Extra register needed to handle CFG");
unsigned CmpResReg =
Register CmpResReg =
HeadMBB->getParent()->getRegInfo().createVirtualRegister(I32RC);
report_fatal_error("Extra compare instruction needed to handle CFG");
insertCondBranchBefore(LandBlk, I, R600::IF_PREDICATE_SET,
@ -1316,7 +1316,7 @@ int AMDGPUCFGStructurizer::improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
// XXX: We are running this after RA, so creating virtual registers will
// cause an assertion failure in the PostRA scheduling pass.
unsigned InitReg =
Register InitReg =
HeadMBB->getParent()->getRegInfo().createVirtualRegister(I32RC);
insertCondBranchBefore(LandBlk, I, R600::IF_PREDICATE_SET, InitReg,
DebugLoc());

View File

@ -726,7 +726,7 @@ int GCNHazardRecognizer::checkVALUHazardsHelper(const MachineOperand &Def,
if (!TRI->isVGPR(MRI, Def.getReg()))
return WaitStatesNeeded;
unsigned Reg = Def.getReg();
Register Reg = Def.getReg();
auto IsHazardFn = [this, Reg, TRI] (MachineInstr *MI) {
int DataIdx = createsVALUHazard(*MI);
return DataIdx >= 0 &&
@ -792,7 +792,7 @@ int GCNHazardRecognizer::checkRWLaneHazards(MachineInstr *RWLane) {
if (!LaneSelectOp->isReg() || !TRI->isSGPRReg(MRI, LaneSelectOp->getReg()))
return 0;
unsigned LaneSelectReg = LaneSelectOp->getReg();
Register LaneSelectReg = LaneSelectOp->getReg();
auto IsHazardFn = [TII] (MachineInstr *MI) {
return TII->isVALU(*MI);
};
@ -891,7 +891,7 @@ bool GCNHazardRecognizer::fixVcmpxPermlaneHazards(MachineInstr *MI) {
// Use V_MOB_B32 v?, v?. Register must be alive so use src0 of V_PERMLANE*
// which is always a VGPR and available.
auto *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
unsigned Reg = Src0->getReg();
Register Reg = Src0->getReg();
bool IsUndef = Src0->isUndef();
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
TII->get(AMDGPU::V_MOV_B32_e32))
@ -976,7 +976,7 @@ bool GCNHazardRecognizer::fixSMEMtoVectorWriteHazards(MachineInstr *MI) {
if (!SDST)
return false;
const unsigned SDSTReg = SDST->getReg();
const Register SDSTReg = SDST->getReg();
auto IsHazardFn = [SDSTReg, TRI] (MachineInstr *I) {
return SIInstrInfo::isSMRD(*I) && I->readsRegister(SDSTReg, TRI);
};
@ -1251,14 +1251,14 @@ int GCNHazardRecognizer::checkMAIHazards(MachineInstr *MI) {
const int MFMA16x16WritesAGPRAccVgprWriteWaitStates = 7;
const int MFMA32x32WritesAGPRAccVgprWriteWaitStates = 15;
const int MaxWaitStates = 18;
unsigned Reg = Op.getReg();
Register Reg = Op.getReg();
unsigned HazardDefLatency = 0;
auto IsOverlappedMFMAFn = [Reg, &IsMFMAFn, &HazardDefLatency, this]
(MachineInstr *MI) {
if (!IsMFMAFn(MI))
return false;
unsigned DstReg = MI->getOperand(0).getReg();
Register DstReg = MI->getOperand(0).getReg();
if (DstReg == Reg)
return false;
HazardDefLatency = std::max(HazardDefLatency,
@ -1304,7 +1304,7 @@ int GCNHazardRecognizer::checkMAIHazards(MachineInstr *MI) {
auto IsAccVgprWriteFn = [Reg, this] (MachineInstr *MI) {
if (MI->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32)
return false;
unsigned DstReg = MI->getOperand(0).getReg();
Register DstReg = MI->getOperand(0).getReg();
return TRI.regsOverlap(Reg, DstReg);
};
@ -1330,14 +1330,14 @@ int GCNHazardRecognizer::checkMAIHazards(MachineInstr *MI) {
const int MFMA16x16ReadSrcCAccVgprWriteWaitStates = 5;
const int MFMA32x32ReadSrcCAccVgprWriteWaitStates = 13;
const int MaxWaitStates = 13;
unsigned DstReg = MI->getOperand(0).getReg();
Register DstReg = MI->getOperand(0).getReg();
unsigned HazardDefLatency = 0;
auto IsSrcCMFMAFn = [DstReg, &IsMFMAFn, &HazardDefLatency, this]
(MachineInstr *MI) {
if (!IsMFMAFn(MI))
return false;
unsigned Reg = TII.getNamedOperand(*MI, AMDGPU::OpName::src2)->getReg();
Register Reg = TII.getNamedOperand(*MI, AMDGPU::OpName::src2)->getReg();
HazardDefLatency = std::max(HazardDefLatency,
TSchedModel.computeInstrLatency(MI));
return TRI.regsOverlap(Reg, DstReg);
@ -1376,7 +1376,7 @@ int GCNHazardRecognizer::checkMAILdStHazards(MachineInstr *MI) {
if (!Op.isReg() || !TRI.isVGPR(MF.getRegInfo(), Op.getReg()))
continue;
unsigned Reg = Op.getReg();
Register Reg = Op.getReg();
const int AccVgprReadLdStWaitStates = 2;
const int VALUWriteAccVgprReadLdStDepVALUWaitStates = 1;

View File

@ -173,11 +173,11 @@ GCNNSAReassign::CheckNSA(const MachineInstr &MI, bool Fast) const {
bool NSA = false;
for (unsigned I = 0; I < Info->VAddrDwords; ++I) {
const MachineOperand &Op = MI.getOperand(VAddr0Idx + I);
unsigned Reg = Op.getReg();
Register Reg = Op.getReg();
if (Register::isPhysicalRegister(Reg) || !VRM->isAssignedReg(Reg))
return NSA_Status::FIXED;
unsigned PhysReg = VRM->getPhys(Reg);
Register PhysReg = VRM->getPhys(Reg);
if (!Fast) {
if (!PhysReg)
@ -276,7 +276,7 @@ bool GCNNSAReassign::runOnMachineFunction(MachineFunction &MF) {
SlotIndex MinInd, MaxInd;
for (unsigned I = 0; I < Info->VAddrDwords; ++I) {
const MachineOperand &Op = MI->getOperand(VAddr0Idx + I);
unsigned Reg = Op.getReg();
Register Reg = Op.getReg();
LiveInterval *LI = &LIS->getInterval(Reg);
if (llvm::find(Intervals, LI) != Intervals.end()) {
// Same register used, unable to make sequential

View File

@ -364,7 +364,7 @@ unsigned GCNRegBankReassign::analyzeInst(const MachineInstr& MI,
if (!Op.isReg() || Op.isUndef())
continue;
unsigned R = Op.getReg();
Register R = Op.getReg();
if (TRI->hasAGPRs(TRI->getRegClassForReg(*MRI, R)))
continue;
@ -425,7 +425,7 @@ bool GCNRegBankReassign::isReassignable(unsigned Reg) const {
const MachineInstr *Def = MRI->getUniqueVRegDef(Reg);
unsigned PhysReg = VRM->getPhys(Reg);
Register PhysReg = VRM->getPhys(Reg);
if (Def && Def->isCopy() && Def->getOperand(1).getReg() == PhysReg)
return false;
@ -654,7 +654,7 @@ unsigned GCNRegBankReassign::tryReassign(Candidate &C) {
}
std::sort(BankStalls.begin(), BankStalls.end());
unsigned OrigReg = VRM->getPhys(C.Reg);
Register OrigReg = VRM->getPhys(C.Reg);
LRM->unassign(LI);
while (!BankStalls.empty()) {
BankStall BS = BankStalls.pop_back_val();

View File

@ -406,7 +406,7 @@ void GCNDownwardRPTracker::advanceToNext() {
for (const auto &MO : LastTrackedMI->defs()) {
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Register::isVirtualRegister(Reg))
continue;
auto &LiveMask = LiveRegs[Reg];

View File

@ -303,7 +303,7 @@ private:
if (!MO.isReg())
continue;
if (MO.isDef()) {
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (R600::R600_Reg128RegClass.contains(Reg))
DstMI = Reg;
else
@ -312,7 +312,7 @@ private:
&R600::R600_Reg128RegClass);
}
if (MO.isUse()) {
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (R600::R600_Reg128RegClass.contains(Reg))
SrcMI = Reg;
else

View File

@ -135,7 +135,7 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
const R600RegisterInfo &TRI = TII->getRegisterInfo();
unsigned DstReg = MI.getOperand(0).getReg();
Register DstReg = MI.getOperand(0).getReg();
unsigned DstBase = TRI.getEncodingValue(DstReg) & HW_REG_MASK;
for (unsigned Chan = 0; Chan < 4; ++Chan) {
@ -155,11 +155,11 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
unsigned Opcode = BMI->getOpcode();
// While not strictly necessary from hw point of view, we force
// all src operands of a dot4 inst to belong to the same slot.
unsigned Src0 = BMI->getOperand(
TII->getOperandIdx(Opcode, R600::OpName::src0))
Register Src0 =
BMI->getOperand(TII->getOperandIdx(Opcode, R600::OpName::src0))
.getReg();
unsigned Src1 = BMI->getOperand(
TII->getOperandIdx(Opcode, R600::OpName::src1))
Register Src1 =
BMI->getOperand(TII->getOperandIdx(Opcode, R600::OpName::src1))
.getReg();
(void) Src0;
(void) Src1;
@ -205,10 +205,10 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
// T0_Z = CUBE T1_X, T1_Z
// T0_W = CUBE T1_Y, T1_Z
for (unsigned Chan = 0; Chan < 4; Chan++) {
unsigned DstReg = MI.getOperand(
TII->getOperandIdx(MI, R600::OpName::dst)).getReg();
unsigned Src0 = MI.getOperand(
TII->getOperandIdx(MI, R600::OpName::src0)).getReg();
Register DstReg =
MI.getOperand(TII->getOperandIdx(MI, R600::OpName::dst)).getReg();
Register Src0 =
MI.getOperand(TII->getOperandIdx(MI, R600::OpName::src0)).getReg();
unsigned Src1 = 0;
// Determine the correct source registers

View File

@ -334,7 +334,7 @@ R600TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
}
case R600::MASK_WRITE: {
unsigned maskedRegister = MI.getOperand(0).getReg();
Register maskedRegister = MI.getOperand(0).getReg();
assert(Register::isVirtualRegister(maskedRegister));
MachineInstr * defInstr = MRI.getVRegDef(maskedRegister);
TII->addFlag(*defInstr, 0, MO_FLAG_MASK);

View File

@ -293,7 +293,7 @@ R600InstrInfo::getSrcs(MachineInstr &MI) const {
for (unsigned j = 0; j < 8; j++) {
MachineOperand &MO =
MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][0]));
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Reg == R600::ALU_CONST) {
MachineOperand &Sel =
MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][1]));
@ -316,7 +316,7 @@ R600InstrInfo::getSrcs(MachineInstr &MI) const {
if (SrcIdx < 0)
break;
MachineOperand &MO = MI.getOperand(SrcIdx);
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (Reg == R600::ALU_CONST) {
MachineOperand &Sel =
MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][1]));
@ -347,7 +347,7 @@ R600InstrInfo::ExtractSrcs(MachineInstr &MI,
unsigned i = 0;
for (const auto &Src : getSrcs(MI)) {
++i;
unsigned Reg = Src.first->getReg();
Register Reg = Src.first->getReg();
int Index = RI.getEncodingValue(Reg) & 0xff;
if (Reg == R600::OQAP) {
Result.push_back(std::make_pair(Index, 0U));
@ -864,7 +864,7 @@ bool R600InstrInfo::isPredicated(const MachineInstr &MI) const {
if (idx < 0)
return false;
unsigned Reg = MI.getOperand(idx).getReg();
Register Reg = MI.getOperand(idx).getReg();
switch (Reg) {
default: return false;
case R600::PRED_SEL_ONE:
@ -1037,7 +1037,7 @@ bool R600InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
unsigned RegIndex = MI.getOperand(RegOpIdx).getImm();
unsigned Channel = MI.getOperand(ChanOpIdx).getImm();
unsigned Address = calculateIndirectAddress(RegIndex, Channel);
unsigned OffsetReg = MI.getOperand(OffsetOpIdx).getReg();
Register OffsetReg = MI.getOperand(OffsetOpIdx).getReg();
if (OffsetReg == R600::INDIRECT_BASE_ADDR) {
buildMovInstr(MBB, MI, MI.getOperand(DstOpIdx).getReg(),
getIndirectAddrRegClass()->getRegister(Address));
@ -1051,7 +1051,7 @@ bool R600InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
unsigned RegIndex = MI.getOperand(RegOpIdx).getImm();
unsigned Channel = MI.getOperand(ChanOpIdx).getImm();
unsigned Address = calculateIndirectAddress(RegIndex, Channel);
unsigned OffsetReg = MI.getOperand(OffsetOpIdx).getReg();
Register OffsetReg = MI.getOperand(OffsetOpIdx).getReg();
if (OffsetReg == R600::INDIRECT_BASE_ADDR) {
buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
MI.getOperand(ValOpIdx).getReg());

View File

@ -270,7 +270,7 @@ R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
}
// Is the result already member of a X/Y/Z/W class ?
unsigned DestReg = MI->getOperand(0).getReg();
Register DestReg = MI->getOperand(0).getReg();
if (regBelongsToClass(DestReg, &R600::R600_TReg32_XRegClass) ||
regBelongsToClass(DestReg, &R600::R600_AddrRegClass))
return AluT_X;
@ -357,7 +357,7 @@ void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) {
if (DstIndex == -1) {
return;
}
unsigned DestReg = MI->getOperand(DstIndex).getReg();
Register DestReg = MI->getOperand(DstIndex).getReg();
// PressureRegister crashes if an operand is def and used in the same inst
// and we try to constraint its regclass
for (MachineInstr::mop_iterator It = MI->operands_begin(),

View File

@ -197,17 +197,17 @@ unsigned getReassignedChan(
MachineInstr *R600VectorRegMerger::RebuildVector(
RegSeqInfo *RSI, const RegSeqInfo *BaseRSI,
const std::vector<std::pair<unsigned, unsigned>> &RemapChan) const {
unsigned Reg = RSI->Instr->getOperand(0).getReg();
Register Reg = RSI->Instr->getOperand(0).getReg();
MachineBasicBlock::iterator Pos = RSI->Instr;
MachineBasicBlock &MBB = *Pos->getParent();
DebugLoc DL = Pos->getDebugLoc();
unsigned SrcVec = BaseRSI->Instr->getOperand(0).getReg();
Register SrcVec = BaseRSI->Instr->getOperand(0).getReg();
DenseMap<unsigned, unsigned> UpdatedRegToChan = BaseRSI->RegToChan;
std::vector<unsigned> UpdatedUndef = BaseRSI->UndefReg;
for (DenseMap<unsigned, unsigned>::iterator It = RSI->RegToChan.begin(),
E = RSI->RegToChan.end(); It != E; ++It) {
unsigned DstReg = MRI->createVirtualRegister(&R600::R600_Reg128RegClass);
Register DstReg = MRI->createVirtualRegister(&R600::R600_Reg128RegClass);
unsigned SubReg = (*It).first;
unsigned Swizzle = (*It).second;
unsigned Chan = getReassignedChan(RemapChan, Swizzle);
@ -350,7 +350,7 @@ bool R600VectorRegMerger::runOnMachineFunction(MachineFunction &Fn) {
MachineInstr &MI = *MII;
if (MI.getOpcode() != R600::REG_SEQUENCE) {
if (TII->get(MI.getOpcode()).TSFlags & R600_InstFlag::TEX_INST) {
unsigned Reg = MI.getOperand(1).getReg();
Register Reg = MI.getOperand(1).getReg();
for (MachineRegisterInfo::def_instr_iterator
It = MRI->def_instr_begin(Reg), E = MRI->def_instr_end();
It != E; ++It) {
@ -363,7 +363,7 @@ bool R600VectorRegMerger::runOnMachineFunction(MachineFunction &Fn) {
RegSeqInfo RSI(*MRI, &MI);
// All uses of MI are swizzeable ?
unsigned Reg = MI.getOperand(0).getReg();
Register Reg = MI.getOperand(0).getReg();
if (!areAllUsesSwizzeable(Reg))
continue;

View File

@ -90,7 +90,7 @@ private:
if (DstIdx == -1) {
continue;
}
unsigned Dst = BI->getOperand(DstIdx).getReg();
Register Dst = BI->getOperand(DstIdx).getReg();
if (isTrans || TII->isTransOnly(*BI)) {
Result[Dst] = R600::PS;
continue;
@ -136,7 +136,7 @@ private:
int OperandIdx = TII->getOperandIdx(MI.getOpcode(), Ops[i]);
if (OperandIdx < 0)
continue;
unsigned Src = MI.getOperand(OperandIdx).getReg();
Register Src = MI.getOperand(OperandIdx).getReg();
const DenseMap<unsigned, unsigned>::const_iterator It = PVs.find(Src);
if (It != PVs.end())
MI.getOperand(OperandIdx).setReg(It->second);

View File

@ -129,7 +129,7 @@ bool SIAddIMGInit::runOnMachineFunction(MachineFunction &MF) {
continue;
// Create a register for the intialization value.
unsigned PrevDst =
Register PrevDst =
MRI.createVirtualRegister(TII->getOpRegClass(MI, DstIdx));
unsigned NewDst = 0; // Final initialized value will be in here
@ -150,7 +150,7 @@ bool SIAddIMGInit::runOnMachineFunction(MachineFunction &MF) {
NewDst =
MRI.createVirtualRegister(TII->getOpRegClass(MI, DstIdx));
// Initialize dword
unsigned SubReg =
Register SubReg =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), SubReg)
.addImm(0);

View File

@ -161,8 +161,8 @@ static std::pair<const TargetRegisterClass *, const TargetRegisterClass *>
getCopyRegClasses(const MachineInstr &Copy,
const SIRegisterInfo &TRI,
const MachineRegisterInfo &MRI) {
unsigned DstReg = Copy.getOperand(0).getReg();
unsigned SrcReg = Copy.getOperand(1).getReg();
Register DstReg = Copy.getOperand(0).getReg();
Register SrcReg = Copy.getOperand(1).getReg();
const TargetRegisterClass *SrcRC = Register::isVirtualRegister(SrcReg)
? MRI.getRegClass(SrcReg)
@ -197,8 +197,8 @@ static bool tryChangeVGPRtoSGPRinCopy(MachineInstr &MI,
const SIInstrInfo *TII) {
MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
auto &Src = MI.getOperand(1);
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = Src.getReg();
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = Src.getReg();
if (!Register::isVirtualRegister(SrcReg) ||
!Register::isVirtualRegister(DstReg))
return false;
@ -236,7 +236,7 @@ static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI,
MachineRegisterInfo &MRI) {
assert(MI.isRegSequence());
unsigned DstReg = MI.getOperand(0).getReg();
Register DstReg = MI.getOperand(0).getReg();
if (!TRI->isSGPRClass(MRI.getRegClass(DstReg)))
return false;
@ -279,7 +279,7 @@ static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI,
bool IsAGPR = TRI->hasAGPRs(DstRC);
for (unsigned I = 1, N = MI.getNumOperands(); I != N; I += 2) {
unsigned SrcReg = MI.getOperand(I).getReg();
Register SrcReg = MI.getOperand(I).getReg();
unsigned SrcSubReg = MI.getOperand(I).getSubReg();
const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
@ -289,7 +289,7 @@ static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI,
SrcRC = TRI->getSubRegClass(SrcRC, SrcSubReg);
const TargetRegisterClass *NewSrcRC = TRI->getEquivalentVGPRClass(SrcRC);
unsigned TmpReg = MRI.createVirtualRegister(NewSrcRC);
Register TmpReg = MRI.createVirtualRegister(NewSrcRC);
BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY),
TmpReg)
@ -297,7 +297,7 @@ static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI,
if (IsAGPR) {
const TargetRegisterClass *NewSrcRC = TRI->getEquivalentAGPRClass(SrcRC);
unsigned TmpAReg = MRI.createVirtualRegister(NewSrcRC);
Register TmpAReg = MRI.createVirtualRegister(NewSrcRC);
unsigned Opc = NewSrcRC == &AMDGPU::AGPR_32RegClass ?
AMDGPU::V_ACCVGPR_WRITE_B32 : AMDGPU::COPY;
BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(Opc),
@ -318,7 +318,7 @@ static bool phiHasVGPROperands(const MachineInstr &PHI,
const SIRegisterInfo *TRI,
const SIInstrInfo *TII) {
for (unsigned i = 1; i < PHI.getNumOperands(); i += 2) {
unsigned Reg = PHI.getOperand(i).getReg();
Register Reg = PHI.getOperand(i).getReg();
if (TRI->hasVGPRs(MRI.getRegClass(Reg)))
return true;
}
@ -329,7 +329,7 @@ static bool phiHasBreakDef(const MachineInstr &PHI,
const MachineRegisterInfo &MRI,
SmallSet<unsigned, 8> &Visited) {
for (unsigned i = 1; i < PHI.getNumOperands(); i += 2) {
unsigned Reg = PHI.getOperand(i).getReg();
Register Reg = PHI.getOperand(i).getReg();
if (Visited.count(Reg))
continue;
@ -641,7 +641,7 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
}
if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI)) {
unsigned SrcReg = MI.getOperand(1).getReg();
Register SrcReg = MI.getOperand(1).getReg();
if (!Register::isVirtualRegister(SrcReg)) {
TII->moveToVALU(MI, MDT);
break;
@ -666,7 +666,7 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
break;
}
case AMDGPU::PHI: {
unsigned Reg = MI.getOperand(0).getReg();
Register Reg = MI.getOperand(0).getReg();
if (!TRI->isSGPRClass(MRI.getRegClass(Reg)))
break;

View File

@ -248,7 +248,7 @@ static bool updateOperand(FoldCandidate &Fold,
bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
unsigned NewReg0 = MRI.createVirtualRegister(Dst0RC);
Register NewReg0 = MRI.createVirtualRegister(Dst0RC);
MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
@ -443,7 +443,7 @@ static bool tryToFoldACImm(const SIInstrInfo *TII,
if (!OpToFold.isReg())
return false;
unsigned UseReg = OpToFold.getReg();
Register UseReg = OpToFold.getReg();
if (!Register::isVirtualRegister(UseReg))
return false;
@ -518,7 +518,7 @@ void SIFoldOperands::foldOperand(
// REG_SEQUENCE instructions, so we have to fold them into the
// uses of REG_SEQUENCE.
if (UseMI->isRegSequence()) {
unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
Register RegSeqDstReg = UseMI->getOperand(0).getReg();
unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
MachineRegisterInfo::use_iterator Next;
@ -569,12 +569,12 @@ void SIFoldOperands::foldOperand(
OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
if (FoldingImmLike && UseMI->isCopy()) {
unsigned DestReg = UseMI->getOperand(0).getReg();
Register DestReg = UseMI->getOperand(0).getReg();
const TargetRegisterClass *DestRC = Register::isVirtualRegister(DestReg)
? MRI->getRegClass(DestReg)
: TRI->getPhysRegClass(DestReg);
unsigned SrcReg = UseMI->getOperand(1).getReg();
Register SrcReg = UseMI->getOperand(1).getReg();
if (Register::isVirtualRegister(DestReg) &&
Register::isVirtualRegister(SrcReg)) {
const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg);
@ -710,7 +710,7 @@ void SIFoldOperands::foldOperand(
// Split 64-bit constants into 32-bits for folding.
if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
unsigned UseReg = UseOp.getReg();
Register UseReg = UseOp.getReg();
const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg);
if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)

View File

@ -120,7 +120,7 @@ static bool isValidClauseInst(const MachineInstr &MI, bool IsVMEMClause) {
return false;
// If this is a load instruction where the result has been coalesced with an operand, then we cannot clause it.
for (const MachineOperand &ResMO : MI.defs()) {
unsigned ResReg = ResMO.getReg();
Register ResReg = ResMO.getReg();
for (const MachineOperand &MO : MI.uses()) {
if (!MO.isReg() || MO.isDef())
continue;
@ -216,7 +216,7 @@ bool SIFormMemoryClauses::canBundle(const MachineInstr &MI,
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
// If it is tied we will need to write same register as we read.
if (MO.isTied())
@ -265,7 +265,7 @@ void SIFormMemoryClauses::collectRegUses(const MachineInstr &MI,
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Reg)
continue;

View File

@ -202,15 +202,15 @@ void SIFrameLowering::emitFlatScratchInit(const GCNSubtarget &ST,
DebugLoc DL;
MachineBasicBlock::iterator I = MBB.begin();
unsigned FlatScratchInitReg
= MFI->getPreloadedReg(AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT);
Register FlatScratchInitReg =
MFI->getPreloadedReg(AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT);
MachineRegisterInfo &MRI = MF.getRegInfo();
MRI.addLiveIn(FlatScratchInitReg);
MBB.addLiveIn(FlatScratchInitReg);
unsigned FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0);
unsigned FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1);
Register FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0);
Register FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1);
unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
@ -424,7 +424,7 @@ void SIFrameLowering::emitEntryFunctionPrologue(MachineFunction &MF,
getReservedPrivateSegmentWaveByteOffsetReg(ST, TII, TRI, MFI, MF);
// We need to insert initialization of the scratch resource descriptor.
unsigned PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg(
Register PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg(
AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
unsigned PreloadedPrivateBufferReg = AMDGPU::NoRegister;
@ -539,9 +539,9 @@ void SIFrameLowering::emitEntryFunctionScratchSetup(const GCNSubtarget &ST,
if (ST.isAmdPalOS()) {
// The pointer to the GIT is formed from the offset passed in and either
// the amdgpu-git-ptr-high function attribute or the top part of the PC
unsigned RsrcLo = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
unsigned RsrcHi = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
Register RsrcLo = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
Register RsrcHi = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
Register Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32);
@ -601,14 +601,14 @@ void SIFrameLowering::emitEntryFunctionScratchSetup(const GCNSubtarget &ST,
assert(!ST.isAmdHsaOrMesa(Fn));
const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32);
unsigned Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2);
unsigned Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3);
Register Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2);
Register Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3);
// Use relocations to get the pointer, and setup the other bits manually.
uint64_t Rsrc23 = TII->getScratchRsrcWords23();
if (MFI->hasImplicitBufferPtr()) {
unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
Register Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) {
const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64);
@ -640,8 +640,8 @@ void SIFrameLowering::emitEntryFunctionScratchSetup(const GCNSubtarget &ST,
MBB.addLiveIn(MFI->getImplicitBufferPtrUserSGPR());
}
} else {
unsigned Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
unsigned Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
Register Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
Register Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
BuildMI(MBB, I, DL, SMovB32, Rsrc0)
.addExternalSymbol("SCRATCH_RSRC_DWORD0")

View File

@ -1883,7 +1883,7 @@ static void reservePrivateMemoryRegs(const TargetMachine &TM,
// resource. For the Code Object V2 ABI, this will be the first 4 user
// SGPR inputs. We can reserve those and use them directly.
unsigned PrivateSegmentBufferReg =
Register PrivateSegmentBufferReg =
Info.getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
Info.setScratchRSrcReg(PrivateSegmentBufferReg);
} else {
@ -1944,7 +1944,7 @@ static void reservePrivateMemoryRegs(const TargetMachine &TM,
//
// FIXME: Should not do this if inline asm is reading/writing these
// registers.
unsigned PreloadedSP = Info.getPreloadedReg(
Register PreloadedSP = Info.getPreloadedReg(
AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
Info.setStackPtrOffsetReg(PreloadedSP);
@ -1994,7 +1994,7 @@ void SITargetLowering::insertCopiesSplitCSR(
else
llvm_unreachable("Unexpected register class in CSRsViaCopy!");
unsigned NewVR = MRI->createVirtualRegister(RC);
Register NewVR = MRI->createVirtualRegister(RC);
// Create copy from CSR to a virtual register.
Entry->addLiveIn(*I);
BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
@ -2157,7 +2157,7 @@ SDValue SITargetLowering::LowerFormalArguments(
assert(VA.isRegLoc() && "Parameter must be in a register!");
unsigned Reg = VA.getLocReg();
Register Reg = VA.getLocReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
EVT ValVT = VA.getValVT();
@ -3121,7 +3121,7 @@ SITargetLowering::emitGWSMemViolTestLoop(MachineInstr &MI,
bundleInstWithWaitcnt(MI);
unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
// Load and check TRAP_STS.MEM_VIOL
BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_GETREG_B32), Reg)
@ -3162,10 +3162,10 @@ static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
MachineBasicBlock::iterator I = LoopBB.begin();
const TargetRegisterClass *BoolRC = TRI->getBoolRC();
unsigned PhiExec = MRI.createVirtualRegister(BoolRC);
unsigned NewExec = MRI.createVirtualRegister(BoolRC);
unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
unsigned CondReg = MRI.createVirtualRegister(BoolRC);
Register PhiExec = MRI.createVirtualRegister(BoolRC);
Register NewExec = MRI.createVirtualRegister(BoolRC);
Register CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
Register CondReg = MRI.createVirtualRegister(BoolRC);
BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
.addReg(InitReg)
@ -3264,9 +3264,9 @@ static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII,
MachineBasicBlock::iterator I(&MI);
const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SaveExec = MRI.createVirtualRegister(BoolXExecRC);
unsigned TmpExec = MRI.createVirtualRegister(BoolXExecRC);
Register DstReg = MI.getOperand(0).getReg();
Register SaveExec = MRI.createVirtualRegister(BoolXExecRC);
Register TmpExec = MRI.createVirtualRegister(BoolXExecRC);
unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
@ -3339,7 +3339,7 @@ static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII,
SetOn->getOperand(3).setIsUndef();
} else {
unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Register Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
.add(*Idx)
.addImm(Offset);
@ -3375,8 +3375,8 @@ static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
MachineFunction *MF = MBB.getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
unsigned Dst = MI.getOperand(0).getReg();
unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
Register Dst = MI.getOperand(0).getReg();
Register SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
@ -3414,8 +3414,8 @@ static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
const DebugLoc &DL = MI.getDebugLoc();
MachineBasicBlock::iterator I(&MI);
unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
@ -3466,7 +3466,7 @@ static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
MachineFunction *MF = MBB.getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
unsigned Dst = MI.getOperand(0).getReg();
Register Dst = MI.getOperand(0).getReg();
const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
@ -3529,7 +3529,7 @@ static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
const DebugLoc &DL = MI.getDebugLoc();
unsigned PhiReg = MRI.createVirtualRegister(VecRC);
Register PhiReg = MRI.createVirtualRegister(VecRC);
auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
Offset, UseGPRIdxMode, false);
@ -3588,8 +3588,8 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
MachineOperand &Src0 = MI.getOperand(1);
MachineOperand &Src1 = MI.getOperand(2);
unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
Src0, BoolRC, AMDGPU::sub0,
@ -3656,8 +3656,8 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
// S_CMOV_B64 exec, -1
MachineInstr *FirstMI = &*BB->begin();
MachineRegisterInfo &MRI = MF->getRegInfo();
unsigned InputReg = MI.getOperand(0).getReg();
unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
Register InputReg = MI.getOperand(0).getReg();
Register CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
bool Found = false;
// Move the COPY of the input reg to the beginning, so that we can use it.
@ -3731,16 +3731,16 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
const SIRegisterInfo *TRI = ST.getRegisterInfo();
unsigned Dst = MI.getOperand(0).getReg();
unsigned Src0 = MI.getOperand(1).getReg();
unsigned Src1 = MI.getOperand(2).getReg();
Register Dst = MI.getOperand(0).getReg();
Register Src0 = MI.getOperand(1).getReg();
Register Src1 = MI.getOperand(2).getReg();
const DebugLoc &DL = MI.getDebugLoc();
unsigned SrcCond = MI.getOperand(3).getReg();
Register SrcCond = MI.getOperand(3).getReg();
unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
const auto *CondRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
unsigned SrcCondCopy = MRI.createVirtualRegister(CondRC);
Register SrcCondCopy = MRI.createVirtualRegister(CondRC);
BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
.addReg(SrcCond);
@ -10377,7 +10377,7 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
Node->use_begin()->isMachineOpcode() &&
Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG &&
!Node->use_begin()->hasAnyUseOfValue(0))) {
unsigned Def = MI.getOperand(0).getReg();
Register Def = MI.getOperand(0).getReg();
// Change this into a noret atomic.
MI.setDesc(TII->get(NoRetAtomicOp));

View File

@ -458,7 +458,7 @@ bool SIInstrInfo::shouldClusterMemOps(const MachineOperand &BaseOp1,
const MachineRegisterInfo &MRI =
FirstLdSt.getParent()->getParent()->getRegInfo();
const unsigned Reg = FirstDst->getReg();
const Register Reg = FirstDst->getReg();
const TargetRegisterClass *DstRC = Register::isVirtualRegister(Reg)
? MRI.getRegClass(Reg)
@ -807,7 +807,7 @@ void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB,
"Not a VGPR32 reg");
if (Cond.size() == 1) {
unsigned SReg = MRI.createVirtualRegister(BoolXExecRC);
Register SReg = MRI.createVirtualRegister(BoolXExecRC);
BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
.add(Cond[0]);
BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
@ -820,7 +820,7 @@ void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB,
assert(Cond[0].isImm() && "Cond[0] is not an immediate");
switch (Cond[0].getImm()) {
case SIInstrInfo::SCC_TRUE: {
unsigned SReg = MRI.createVirtualRegister(BoolXExecRC);
Register SReg = MRI.createVirtualRegister(BoolXExecRC);
BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
: AMDGPU::S_CSELECT_B64), SReg)
.addImm(-1)
@ -834,7 +834,7 @@ void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB,
break;
}
case SIInstrInfo::SCC_FALSE: {
unsigned SReg = MRI.createVirtualRegister(BoolXExecRC);
Register SReg = MRI.createVirtualRegister(BoolXExecRC);
BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
: AMDGPU::S_CSELECT_B64), SReg)
.addImm(0)
@ -850,7 +850,7 @@ void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB,
case SIInstrInfo::VCCNZ: {
MachineOperand RegOp = Cond[1];
RegOp.setImplicit(false);
unsigned SReg = MRI.createVirtualRegister(BoolXExecRC);
Register SReg = MRI.createVirtualRegister(BoolXExecRC);
BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
.add(RegOp);
BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
@ -864,7 +864,7 @@ void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB,
case SIInstrInfo::VCCZ: {
MachineOperand RegOp = Cond[1];
RegOp.setImplicit(false);
unsigned SReg = MRI.createVirtualRegister(BoolXExecRC);
Register SReg = MRI.createVirtualRegister(BoolXExecRC);
BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
.add(RegOp);
BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
@ -876,8 +876,8 @@ void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB,
break;
}
case SIInstrInfo::EXECNZ: {
unsigned SReg = MRI.createVirtualRegister(BoolXExecRC);
unsigned SReg2 = MRI.createVirtualRegister(RI.getBoolRC());
Register SReg = MRI.createVirtualRegister(BoolXExecRC);
Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC());
BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32
: AMDGPU::S_OR_SAVEEXEC_B64), SReg2)
.addImm(0);
@ -894,8 +894,8 @@ void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB,
break;
}
case SIInstrInfo::EXECZ: {
unsigned SReg = MRI.createVirtualRegister(BoolXExecRC);
unsigned SReg2 = MRI.createVirtualRegister(RI.getBoolRC());
Register SReg = MRI.createVirtualRegister(BoolXExecRC);
Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC());
BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32
: AMDGPU::S_OR_SAVEEXEC_B64), SReg2)
.addImm(0);
@ -925,7 +925,7 @@ unsigned SIInstrInfo::insertEQ(MachineBasicBlock *MBB,
const DebugLoc &DL,
unsigned SrcReg, int Value) const {
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
unsigned Reg = MRI.createVirtualRegister(RI.getBoolRC());
Register Reg = MRI.createVirtualRegister(RI.getBoolRC());
BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg)
.addImm(Value)
.addReg(SrcReg);
@ -938,7 +938,7 @@ unsigned SIInstrInfo::insertNE(MachineBasicBlock *MBB,
const DebugLoc &DL,
unsigned SrcReg, int Value) const {
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
unsigned Reg = MRI.createVirtualRegister(RI.getBoolRC());
Register Reg = MRI.createVirtualRegister(RI.getBoolRC());
BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg)
.addImm(Value)
.addReg(SrcReg);
@ -1083,7 +1083,7 @@ void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
auto MIB = BuildMI(MBB, MI, DL, get(Opcode));
if (RI.hasAGPRs(RC)) {
MachineRegisterInfo &MRI = MF->getRegInfo();
unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
MIB.addReg(Tmp, RegState::Define);
}
MIB.addReg(SrcReg, getKillRegState(isKill)) // data
@ -1208,7 +1208,7 @@ void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
auto MIB = BuildMI(MBB, MI, DL, get(Opcode), DestReg);
if (RI.hasAGPRs(RC)) {
MachineRegisterInfo &MRI = MF->getRegInfo();
unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
MIB.addReg(Tmp, RegState::Define);
}
MIB.addFrameIndex(FrameIndex) // vaddr
@ -1242,13 +1242,13 @@ unsigned SIInstrInfo::calculateLDSSpillAddress(
if (!AMDGPU::isShader(MF->getFunction().getCallingConv()) &&
WorkGroupSize > WavefrontSize) {
unsigned TIDIGXReg
= MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
unsigned TIDIGYReg
= MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
unsigned TIDIGZReg
= MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
unsigned InputPtrReg =
Register TIDIGXReg =
MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
Register TIDIGYReg =
MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
Register TIDIGZReg =
MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
Register InputPtrReg =
MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) {
if (!Entry.isLiveIn(Reg))
@ -1416,9 +1416,9 @@ bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
break;
case AMDGPU::V_MOV_B64_PSEUDO: {
unsigned Dst = MI.getOperand(0).getReg();
unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
Register Dst = MI.getOperand(0).getReg();
Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
const MachineOperand &SrcOp = MI.getOperand(1);
// FIXME: Will this work for 64-bit floating point immediates?
@ -1475,7 +1475,7 @@ bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
case AMDGPU::V_MOVRELD_B32_V8:
case AMDGPU::V_MOVRELD_B32_V16: {
const MCInstrDesc &MovRelDesc = get(AMDGPU::V_MOVRELD_B32_e32);
unsigned VecReg = MI.getOperand(0).getReg();
Register VecReg = MI.getOperand(0).getReg();
bool IsUndef = MI.getOperand(1).isUndef();
unsigned SubReg = AMDGPU::sub0 + MI.getOperand(3).getImm();
assert(VecReg == MI.getOperand(1).getReg());
@ -1498,9 +1498,9 @@ bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
}
case AMDGPU::SI_PC_ADD_REL_OFFSET: {
MachineFunction &MF = *MBB.getParent();
unsigned Reg = MI.getOperand(0).getReg();
unsigned RegLo = RI.getSubReg(Reg, AMDGPU::sub0);
unsigned RegHi = RI.getSubReg(Reg, AMDGPU::sub1);
Register Reg = MI.getOperand(0).getReg();
Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0);
Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1);
// Create a bundle so these instructions won't be re-ordered by the
// post-RA scheduler.
@ -1580,7 +1580,7 @@ bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI,
static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI,
MachineOperand &RegOp,
MachineOperand &NonRegOp) {
unsigned Reg = RegOp.getReg();
Register Reg = RegOp.getReg();
unsigned SubReg = RegOp.getSubReg();
bool IsKill = RegOp.isKill();
bool IsDead = RegOp.isDead();
@ -1716,7 +1716,7 @@ unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
// FIXME: Virtual register workaround for RegScavenger not working with empty
// blocks.
unsigned PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
Register PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
auto I = MBB.end();
@ -2170,7 +2170,7 @@ void SIInstrInfo::insertSelect(MachineBasicBlock &MBB,
SmallVector<unsigned, 8> Regs;
for (int Idx = 0; Idx != NElts; ++Idx) {
unsigned DstElt = MRI.createVirtualRegister(EltRC);
Register DstElt = MRI.createVirtualRegister(EltRC);
Regs.push_back(DstElt);
unsigned SubIdx = SubIndices[Idx];
@ -2334,7 +2334,7 @@ bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
UseMI.RemoveOperand(
AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp));
unsigned Src1Reg = Src1->getReg();
Register Src1Reg = Src1->getReg();
unsigned Src1SubReg = Src1->getSubReg();
Src0->setReg(Src1Reg);
Src0->setSubReg(Src1SubReg);
@ -3152,7 +3152,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
if (!Op.isReg())
continue;
unsigned Reg = Op.getReg();
Register Reg = Op.getReg();
if (!Register::isVirtualRegister(Reg) && !RC->contains(Reg)) {
ErrInfo = "inlineasm operand has incorrect register class.";
return false;
@ -3217,7 +3217,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
continue;
if (RegClass != -1) {
unsigned Reg = MI.getOperand(i).getReg();
Register Reg = MI.getOperand(i).getReg();
if (Reg == AMDGPU::NoRegister || Register::isVirtualRegister(Reg))
continue;
@ -3716,7 +3716,7 @@ const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
const MCInstrDesc &Desc = get(MI.getOpcode());
if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
Desc.OpInfo[OpNo].RegClass == -1) {
unsigned Reg = MI.getOperand(OpNo).getReg();
Register Reg = MI.getOperand(OpNo).getReg();
if (Register::isVirtualRegister(Reg))
return MRI.getRegClass(Reg);
@ -3749,7 +3749,7 @@ void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const {
else
VRC = &AMDGPU::VGPR_32RegClass;
unsigned Reg = MRI.createVirtualRegister(VRC);
Register Reg = MRI.createVirtualRegister(VRC);
DebugLoc DL = MBB->findDebugLoc(I);
BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO);
MO.ChangeToRegister(Reg, false);
@ -3764,7 +3764,7 @@ unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI,
const {
MachineBasicBlock *MBB = MI->getParent();
DebugLoc DL = MI->getDebugLoc();
unsigned SubReg = MRI.createVirtualRegister(SubRC);
Register SubReg = MRI.createVirtualRegister(SubRC);
if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) {
BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
@ -3776,7 +3776,7 @@ unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI,
// value so we don't need to worry about merging its subreg index with the
// SubIdx passed to this function. The register coalescer should be able to
// eliminate this extra copy.
unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC);
Register NewSuperReg = MRI.createVirtualRegister(SuperRC);
BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg)
.addReg(SuperReg.getReg(), 0, SuperReg.getSubReg());
@ -3822,7 +3822,7 @@ bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI,
if (!MO.isReg())
return false;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
const TargetRegisterClass *RC = Register::isVirtualRegister(Reg)
? MRI.getRegClass(Reg)
: RI.getPhysRegClass(Reg);
@ -3942,13 +3942,13 @@ void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI,
if (Opc == AMDGPU::V_WRITELANE_B32) {
const DebugLoc &DL = MI.getDebugLoc();
if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) {
unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
.add(Src0);
Src0.ChangeToRegister(Reg, false);
}
if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) {
unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
const DebugLoc &DL = MI.getDebugLoc();
BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
.add(Src1);
@ -3974,7 +3974,7 @@ void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI,
// select is uniform.
if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() &&
RI.isVGPR(MRI, Src1.getReg())) {
unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
const DebugLoc &DL = MI.getDebugLoc();
BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
.add(Src1);
@ -4010,7 +4010,7 @@ void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI,
MI.setDesc(get(CommutedOpc));
unsigned Src0Reg = Src0.getReg();
Register Src0Reg = Src0.getReg();
unsigned Src0SubReg = Src0.getSubReg();
bool Src0Kill = Src0.isKill();
@ -4046,13 +4046,13 @@ void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI,
MachineOperand &Src2 = MI.getOperand(VOP3Idx[2]);
const DebugLoc &DL = MI.getDebugLoc();
if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) {
unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
.add(Src1);
Src1.ChangeToRegister(Reg, false);
}
if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) {
unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
.add(Src2);
Src2.ChangeToRegister(Reg, false);
@ -4120,12 +4120,12 @@ unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI,
MachineRegisterInfo &MRI) const {
const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg);
const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC);
unsigned DstReg = MRI.createVirtualRegister(SRC);
Register DstReg = MRI.createVirtualRegister(SRC);
unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32;
if (RI.hasAGPRs(VRC)) {
VRC = RI.getEquivalentVGPRClass(VRC);
unsigned NewSrcReg = MRI.createVirtualRegister(VRC);
Register NewSrcReg = MRI.createVirtualRegister(VRC);
BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
get(TargetOpcode::COPY), NewSrcReg)
.addReg(SrcReg);
@ -4141,7 +4141,7 @@ unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI,
SmallVector<unsigned, 8> SRegs;
for (unsigned i = 0; i < SubRegs; ++i) {
unsigned SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
Register SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
get(AMDGPU::V_READFIRSTLANE_B32), SGPR)
.addReg(SrcReg, 0, RI.getSubRegFromChannel(i));
@ -4183,7 +4183,7 @@ void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB,
MachineOperand &Op,
MachineRegisterInfo &MRI,
const DebugLoc &DL) const {
unsigned OpReg = Op.getReg();
Register OpReg = Op.getReg();
unsigned OpSubReg = Op.getSubReg();
const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg(
@ -4193,7 +4193,7 @@ void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB,
if (DstRC == OpRC)
return;
unsigned DstReg = MRI.createVirtualRegister(DstRC);
Register DstReg = MRI.createVirtualRegister(DstRC);
MachineInstr *Copy =
BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op);
@ -4230,18 +4230,18 @@ emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI,
MachineBasicBlock::iterator I = LoopBB.begin();
unsigned VRsrc = Rsrc.getReg();
Register VRsrc = Rsrc.getReg();
unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef());
unsigned SaveExec = MRI.createVirtualRegister(BoolXExecRC);
unsigned CondReg0 = MRI.createVirtualRegister(BoolXExecRC);
unsigned CondReg1 = MRI.createVirtualRegister(BoolXExecRC);
unsigned AndCond = MRI.createVirtualRegister(BoolXExecRC);
unsigned SRsrcSub0 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
unsigned SRsrcSub1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
unsigned SRsrcSub2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
unsigned SRsrcSub3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
unsigned SRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
Register SaveExec = MRI.createVirtualRegister(BoolXExecRC);
Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC);
Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC);
Register AndCond = MRI.createVirtualRegister(BoolXExecRC);
Register SRsrcSub0 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
Register SRsrcSub1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
Register SRsrcSub2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
Register SRsrcSub3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
Register SRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
// Beginning of the loop, read the next Rsrc variant.
BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub0)
@ -4309,7 +4309,7 @@ static void loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI,
unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
unsigned SaveExec = MRI.createVirtualRegister(BoolXExecRC);
Register SaveExec = MRI.createVirtualRegister(BoolXExecRC);
// Save the EXEC mask
BuildMI(MBB, I, DL, TII.get(MovExecOpc), SaveExec).addReg(Exec);
@ -4377,10 +4377,10 @@ extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) {
AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass);
// Create an empty resource descriptor
unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
Register Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
Register SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
Register SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
Register NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat();
// Zero64 = 0
@ -4509,8 +4509,8 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI,
// Legalize INSERT_SUBREG
// src0 must have the same register class as dst
if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) {
unsigned Dst = MI.getOperand(0).getReg();
unsigned Src0 = MI.getOperand(1).getReg();
Register Dst = MI.getOperand(0).getReg();
Register Src0 = MI.getOperand(1).getReg();
const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0);
if (DstRC != Src0RC) {
@ -4584,13 +4584,13 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI,
if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) {
// This is already an ADDR64 instruction so we need to add the pointer
// extracted from the resource descriptor to the current value of VAddr.
unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
Register NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
const auto *BoolXExecRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
unsigned CondReg0 = MRI.createVirtualRegister(BoolXExecRC);
unsigned CondReg1 = MRI.createVirtualRegister(BoolXExecRC);
Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC);
Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC);
unsigned RsrcPtr, NewSRsrc;
std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc);
@ -4630,7 +4630,7 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI,
unsigned RsrcPtr, NewSRsrc;
std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc);
unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata);
MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset);
@ -4940,7 +4940,7 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst,
bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef();
unsigned NewDstReg = AMDGPU::NoRegister;
if (HasDst) {
unsigned DstReg = Inst.getOperand(0).getReg();
Register DstReg = Inst.getOperand(0).getReg();
if (Register::isPhysicalRegister(DstReg))
continue;
@ -4995,8 +4995,8 @@ bool SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst,
MachineBasicBlock &MBB = *Inst.getParent();
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
unsigned OldDstReg = Inst.getOperand(0).getReg();
unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register OldDstReg = Inst.getOperand(0).getReg();
Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned Opc = Inst.getOpcode();
assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32);
@ -5029,8 +5029,8 @@ void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist,
MachineOperand &Dest = Inst.getOperand(0);
MachineOperand &Src = Inst.getOperand(1);
unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned SubOp = ST.hasAddNoCarry() ?
AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_I32_e32;
@ -5059,7 +5059,7 @@ void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist,
MachineOperand &Src1 = Inst.getOperand(2);
if (ST.hasDLInsts()) {
unsigned NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL);
legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src1, MRI, DL);
@ -5079,8 +5079,8 @@ void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist,
bool Src1IsSGPR = Src1.isReg() &&
RI.isSGPRClass(MRI.getRegClass(Src1.getReg()));
MachineInstr *Xor;
unsigned Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
unsigned NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Register Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
// Build a pair of scalar instructions and add them to the work list.
// The next iteration over the work list will lower these to the vector
@ -5124,8 +5124,8 @@ void SIInstrInfo::splitScalarNotBinop(SetVectorType &Worklist,
MachineOperand &Src0 = Inst.getOperand(1);
MachineOperand &Src1 = Inst.getOperand(2);
unsigned NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
unsigned Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), Interm)
.add(Src0)
@ -5153,8 +5153,8 @@ void SIInstrInfo::splitScalarBinOpN2(SetVectorType& Worklist,
MachineOperand &Src0 = Inst.getOperand(1);
MachineOperand &Src1 = Inst.getOperand(2);
unsigned NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
unsigned Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Interm)
.add(Src1);
@ -5196,16 +5196,16 @@ void SIInstrInfo::splitScalar64BitUnaryOp(
const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0);
unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0);
MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
AMDGPU::sub1, Src0SubRC);
unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1);
unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC);
Register FullDestReg = MRI.createVirtualRegister(NewDestRC);
BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
.addReg(DestSub0)
.addImm(AMDGPU::sub0)
@ -5233,12 +5233,12 @@ void SIInstrInfo::splitScalar64BitAddSub(SetVectorType &Worklist,
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
unsigned FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned CarryReg = MRI.createVirtualRegister(CarryRC);
unsigned DeadCarryReg = MRI.createVirtualRegister(CarryRC);
Register CarryReg = MRI.createVirtualRegister(CarryRC);
Register DeadCarryReg = MRI.createVirtualRegister(CarryRC);
MachineOperand &Dest = Inst.getOperand(0);
MachineOperand &Src0 = Inst.getOperand(1);
@ -5334,17 +5334,17 @@ void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist,
const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0);
unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0)
.add(SrcReg0Sub0)
.add(SrcReg1Sub0);
unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1)
.add(SrcReg0Sub1)
.add(SrcReg1Sub1);
unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC);
Register FullDestReg = MRI.createVirtualRegister(NewDestRC);
BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
.addReg(DestSub0)
.addImm(AMDGPU::sub0)
@ -5375,7 +5375,7 @@ void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist,
const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
unsigned Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
MachineOperand* Op0;
MachineOperand* Op1;
@ -5391,7 +5391,7 @@ void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist,
BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm)
.add(*Op0);
unsigned NewDest = MRI.createVirtualRegister(DestRC);
Register NewDest = MRI.createVirtualRegister(DestRC);
MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest)
.addReg(Interm)
@ -5418,8 +5418,8 @@ void SIInstrInfo::splitScalar64BitBCNT(
MRI.getRegClass(Src.getReg()) :
&AMDGPU::SGPR_32RegClass;
unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0);
@ -5458,9 +5458,9 @@ void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist,
Offset == 0 && "Not implemented");
if (BitWidth < 32) {
unsigned MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
Register MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo)
.addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0)
@ -5483,8 +5483,8 @@ void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist,
}
MachineOperand &Src = Inst.getOperand(1);
unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg)
.addImm(31)
@ -5539,7 +5539,7 @@ void SIInstrInfo::addUsersToMoveToVALUWorklist(
void SIInstrInfo::movePackToVALU(SetVectorType &Worklist,
MachineRegisterInfo &MRI,
MachineInstr &Inst) const {
unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
MachineBasicBlock *MBB = Inst.getParent();
MachineOperand &Src0 = Inst.getOperand(1);
MachineOperand &Src1 = Inst.getOperand(2);
@ -5547,8 +5547,8 @@ void SIInstrInfo::movePackToVALU(SetVectorType &Worklist,
switch (Inst.getOpcode()) {
case AMDGPU::S_PACK_LL_B32_B16: {
unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
// FIXME: Can do a lot better if we know the high bits of src0 or src1 are
// 0.
@ -5566,7 +5566,7 @@ void SIInstrInfo::movePackToVALU(SetVectorType &Worklist,
break;
}
case AMDGPU::S_PACK_LH_B32_B16: {
unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg)
.addImm(0xffff);
BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32), ResultReg)
@ -5576,8 +5576,8 @@ void SIInstrInfo::movePackToVALU(SetVectorType &Worklist,
break;
}
case AMDGPU::S_PACK_HH_B32_B16: {
unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg)
.addImm(16)
.add(Src0);
@ -5695,7 +5695,7 @@ unsigned SIInstrInfo::findUsedSGPR(const MachineInstr &MI,
return MO.getReg();
// If this could be a VGPR or an SGPR, Check the dynamic register class.
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
const TargetRegisterClass *RegRC = MRI.getRegClass(Reg);
if (RI.isSGPRClass(RegRC))
UsedSGPRs[i] = Reg;
@ -5950,7 +5950,7 @@ void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry,
MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo();
if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) {
unsigned DstReg = MRI.createVirtualRegister(RI.getBoolRC());
Register DstReg = MRI.createVirtualRegister(RI.getBoolRC());
MachineInstr *SIIF =
BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg)
.add(Branch->getOperand(0))
@ -5977,8 +5977,8 @@ void SIInstrInfo::convertNonUniformLoopRegion(
if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) {
unsigned DstReg = MRI.createVirtualRegister(RI.getBoolRC());
unsigned BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC());
Register DstReg = MRI.createVirtualRegister(RI.getBoolRC());
Register BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC());
MachineInstrBuilder HeaderPHIBuilder =
BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg);
for (MachineBasicBlock::pred_iterator PI = LoopEntry->pred_begin(),
@ -5988,7 +5988,7 @@ void SIInstrInfo::convertNonUniformLoopRegion(
HeaderPHIBuilder.addReg(BackEdgeReg);
} else {
MachineBasicBlock *PMBB = *PI;
unsigned ZeroReg = MRI.createVirtualRegister(RI.getBoolRC());
Register ZeroReg = MRI.createVirtualRegister(RI.getBoolRC());
materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(),
ZeroReg, 0);
HeaderPHIBuilder.addReg(ZeroReg);
@ -6072,7 +6072,7 @@ SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB,
return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg);
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
unsigned UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC());
Register UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC());
MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC());
return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_I32_e64), DestReg)

View File

@ -725,15 +725,15 @@ SILoadStoreOptimizer::mergeRead2Pair(CombineInfo &CI) {
const TargetRegisterClass *SuperRC =
(CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
unsigned DestReg = MRI->createVirtualRegister(SuperRC);
Register DestReg = MRI->createVirtualRegister(SuperRC);
DebugLoc DL = CI.I->getDebugLoc();
unsigned BaseReg = AddrReg->getReg();
Register BaseReg = AddrReg->getReg();
unsigned BaseSubReg = AddrReg->getSubReg();
unsigned BaseRegFlags = 0;
if (CI.BaseOff) {
unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
Register ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
.addImm(CI.BaseOff);
@ -823,11 +823,11 @@ SILoadStoreOptimizer::mergeWrite2Pair(CombineInfo &CI) {
const MCInstrDesc &Write2Desc = TII->get(Opc);
DebugLoc DL = CI.I->getDebugLoc();
unsigned BaseReg = AddrReg->getReg();
Register BaseReg = AddrReg->getReg();
unsigned BaseSubReg = AddrReg->getSubReg();
unsigned BaseRegFlags = 0;
if (CI.BaseOff) {
unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
Register ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
.addImm(CI.BaseOff);
@ -869,7 +869,7 @@ SILoadStoreOptimizer::mergeSBufferLoadImmPair(CombineInfo &CI) {
const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
unsigned DestReg = MRI->createVirtualRegister(SuperRC);
Register DestReg = MRI->createVirtualRegister(SuperRC);
unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
// It shouldn't be possible to get this far if the two instructions
@ -921,7 +921,7 @@ SILoadStoreOptimizer::mergeBufferLoadPair(CombineInfo &CI) {
const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
// Copy to the new source register.
unsigned DestReg = MRI->createVirtualRegister(SuperRC);
Register DestReg = MRI->createVirtualRegister(SuperRC);
unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg);
@ -1103,7 +1103,7 @@ SILoadStoreOptimizer::mergeBufferStorePair(CombineInfo &CI) {
// Copy to the new source register.
const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
unsigned SrcReg = MRI->createVirtualRegister(SuperRC);
Register SrcReg = MRI->createVirtualRegister(SuperRC);
const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
const auto *Src1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
@ -1154,7 +1154,7 @@ SILoadStoreOptimizer::createRegOrImm(int32_t Val, MachineInstr &MI) {
if (TII->isInlineConstant(V))
return MachineOperand::CreateImm(Val);
unsigned Reg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
Register Reg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
MachineInstr *Mov =
BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
TII->get(AMDGPU::S_MOV_B32), Reg)
@ -1185,11 +1185,11 @@ unsigned SILoadStoreOptimizer::computeBase(MachineInstr &MI,
createRegOrImm(static_cast<int32_t>(Addr.Offset >> 32), MI);
const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
unsigned CarryReg = MRI->createVirtualRegister(CarryRC);
unsigned DeadCarryReg = MRI->createVirtualRegister(CarryRC);
Register CarryReg = MRI->createVirtualRegister(CarryRC);
Register DeadCarryReg = MRI->createVirtualRegister(CarryRC);
unsigned DestSub0 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned DestSub1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register DestSub0 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register DestSub1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
MachineInstr *LoHalf =
BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADD_I32_e64), DestSub0)
.addReg(CarryReg, RegState::Define)
@ -1209,7 +1209,7 @@ unsigned SILoadStoreOptimizer::computeBase(MachineInstr &MI,
(void)HiHalf;
LLVM_DEBUG(dbgs() << " "; HiHalf->dump(););
unsigned FullDestReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass);
Register FullDestReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass);
MachineInstr *FullBase =
BuildMI(*MBB, MBBI, DL, TII->get(TargetOpcode::REG_SEQUENCE), FullDestReg)
.addReg(DestSub0)

View File

@ -149,7 +149,7 @@ char &llvm::SILowerControlFlowID = SILowerControlFlow::ID;
static bool isSimpleIf(const MachineInstr &MI, const MachineRegisterInfo *MRI,
const SIInstrInfo *TII) {
unsigned SaveExecReg = MI.getOperand(0).getReg();
Register SaveExecReg = MI.getOperand(0).getReg();
auto U = MRI->use_instr_nodbg_begin(SaveExecReg);
if (U == MRI->use_instr_nodbg_end() ||
@ -209,7 +209,7 @@ void SILowerControlFlow::emitIf(MachineInstr &MI) {
.addReg(Exec)
.addReg(Exec, RegState::ImplicitDefine);
unsigned Tmp = MRI->createVirtualRegister(BoolRC);
Register Tmp = MRI->createVirtualRegister(BoolRC);
MachineInstr *And =
BuildMI(MBB, I, DL, TII->get(AndOpc), Tmp)
@ -546,7 +546,7 @@ void SILowerControlFlow::combineMasks(MachineInstr &MI) {
else if (Ops[1].isIdenticalTo(Ops[2])) UniqueOpndIdx = 1;
else return;
unsigned Reg = MI.getOperand(OpToReplace).getReg();
Register Reg = MI.getOperand(OpToReplace).getReg();
MI.RemoveOperand(OpToReplace);
MI.addOperand(Ops[UniqueOpndIdx]);
if (MRI->use_empty(Reg))

View File

@ -497,8 +497,8 @@ void SILowerI1Copies::lowerCopiesFromI1() {
if (MI.getOpcode() != AMDGPU::COPY)
continue;
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = MI.getOperand(1).getReg();
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
if (!isVreg1(SrcReg))
continue;
@ -544,7 +544,7 @@ void SILowerI1Copies::lowerPhis() {
LF.initialize(MBB);
for (MachineInstr &MI : MBB.phis()) {
unsigned DstReg = MI.getOperand(0).getReg();
Register DstReg = MI.getOperand(0).getReg();
if (!isVreg1(DstReg))
continue;
@ -556,7 +556,7 @@ void SILowerI1Copies::lowerPhis() {
// Collect incoming values.
for (unsigned i = 1; i < MI.getNumOperands(); i += 2) {
assert(i + 1 < MI.getNumOperands());
unsigned IncomingReg = MI.getOperand(i).getReg();
Register IncomingReg = MI.getOperand(i).getReg();
MachineBasicBlock *IncomingMBB = MI.getOperand(i + 1).getMBB();
MachineInstr *IncomingDef = MRI->getUniqueVRegDef(IncomingReg);
@ -669,7 +669,7 @@ void SILowerI1Copies::lowerCopiesToI1() {
MI.getOpcode() != AMDGPU::COPY)
continue;
unsigned DstReg = MI.getOperand(0).getReg();
Register DstReg = MI.getOperand(0).getReg();
if (!isVreg1(DstReg))
continue;
@ -686,7 +686,7 @@ void SILowerI1Copies::lowerCopiesToI1() {
continue;
DebugLoc DL = MI.getDebugLoc();
unsigned SrcReg = MI.getOperand(1).getReg();
Register SrcReg = MI.getOperand(1).getReg();
assert(!MI.getOperand(1).getSubReg());
if (!Register::isVirtualRegister(SrcReg) ||

View File

@ -278,8 +278,8 @@ bool SILowerSGPRSpills::runOnMachineFunction(MachineFunction &MF) {
unsigned FIOp = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
AMDGPU::OpName::vaddr);
int FI = MI.getOperand(FIOp).getIndex();
unsigned VReg = TII->getNamedOperand(MI, AMDGPU::OpName::vdata)
->getReg();
Register VReg =
TII->getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg();
if (FuncInfo->allocateVGPRSpillToAGPR(MF, FI,
TRI->isAGPR(MRI, VReg))) {
TRI->eliminateFrameIndex(MI, 0, FIOp, nullptr);

View File

@ -328,7 +328,7 @@ bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
continue;
}
unsigned CopyFromExec = CopyFromExecInst->getOperand(0).getReg();
Register CopyFromExec = CopyFromExecInst->getOperand(0).getReg();
MachineInstr *SaveExecInst = nullptr;
SmallVector<MachineInstr *, 4> OtherUseInsts;

View File

@ -211,7 +211,7 @@ static unsigned optimizeVcndVcmpPair(MachineBasicBlock &MBB,
return AMDGPU::NoRegister;
MachineOperand *AndCC = &And->getOperand(1);
unsigned CmpReg = AndCC->getReg();
Register CmpReg = AndCC->getReg();
unsigned CmpSubReg = AndCC->getSubReg();
if (CmpReg == ExecReg) {
AndCC = &And->getOperand(2);
@ -234,7 +234,7 @@ static unsigned optimizeVcndVcmpPair(MachineBasicBlock &MBB,
if (!Op1->isReg() || !Op2->isImm() || Op2->getImm() != 1)
return AMDGPU::NoRegister;
unsigned SelReg = Op1->getReg();
Register SelReg = Op1->getReg();
auto *Sel = TRI->findReachingDef(SelReg, Op1->getSubReg(), *Cmp, MRI, LIS);
if (!Sel || Sel->getOpcode() != AMDGPU::V_CNDMASK_B32_e64)
return AMDGPU::NoRegister;
@ -253,7 +253,7 @@ static unsigned optimizeVcndVcmpPair(MachineBasicBlock &MBB,
LLVM_DEBUG(dbgs() << "Folding sequence:\n\t" << *Sel << '\t'
<< *Cmp << '\t' << *And);
unsigned CCReg = CC->getReg();
Register CCReg = CC->getReg();
LIS->RemoveMachineInstrFromMaps(*And);
MachineInstr *Andn2 = BuildMI(MBB, *And, And->getDebugLoc(),
TII->get(Andn2Opc), And->getOperand(0).getReg())
@ -412,7 +412,7 @@ bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) {
if (!SaveExec || !SaveExec->isFullCopy())
continue;
unsigned SavedExec = SaveExec->getOperand(0).getReg();
Register SavedExec = SaveExec->getOperand(0).getReg();
bool SafeToReplace = true;
for (auto& U : MRI.use_nodbg_instructions(SavedExec)) {
if (U.getParent() != SaveExec->getParent()) {

View File

@ -1189,7 +1189,7 @@ void SIPeepholeSDWA::legalizeScalarOperands(MachineInstr &MI,
continue;
}
unsigned VGPR = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register VGPR = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
auto Copy = BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
TII->get(AMDGPU::V_MOV_B32_e32), VGPR);
if (Op.isImm())

View File

@ -90,7 +90,7 @@ bool SIPreAllocateWWMRegs::processDef(MachineOperand &MO) {
if (!MO.isReg())
return false;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!TRI->isVGPR(*MRI, Reg))
return false;
@ -124,14 +124,14 @@ void SIPreAllocateWWMRegs::rewriteRegs(MachineFunction &MF) {
if (!MO.isReg())
continue;
const unsigned VirtReg = MO.getReg();
const Register VirtReg = MO.getReg();
if (Register::isPhysicalRegister(VirtReg))
continue;
if (!VRM->hasPhys(VirtReg))
continue;
unsigned PhysReg = VRM->getPhys(VirtReg);
Register PhysReg = VRM->getPhys(VirtReg);
const unsigned SubReg = MO.getSubReg();
if (SubReg != 0) {
PhysReg = TRI->getSubReg(PhysReg, SubReg);
@ -149,7 +149,7 @@ void SIPreAllocateWWMRegs::rewriteRegs(MachineFunction &MF) {
for (unsigned Reg : RegsToRewrite) {
LIS->removeInterval(Reg);
const unsigned PhysReg = VRM->getPhys(Reg);
const Register PhysReg = VRM->getPhys(Reg);
assert(PhysReg != 0);
MFI->ReserveWWMRegister(PhysReg);
}

View File

@ -390,9 +390,9 @@ void SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
}
MachineRegisterInfo &MRI = MF->getRegInfo();
unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Register OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
unsigned FIReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register FIReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
.addImm(Offset);
@ -715,8 +715,9 @@ void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI,
}
for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += EltSize) {
unsigned SubReg = NumSubRegs == 1 ?
Register(ValueReg) : getSubReg(ValueReg, getSubRegFromChannel(i));
Register SubReg = NumSubRegs == 1
? Register(ValueReg)
: getSubReg(ValueReg, getSubRegFromChannel(i));
unsigned SOffsetRegState = 0;
unsigned SrcDstRegState = getDefRegState(!IsStore);
@ -851,8 +852,8 @@ bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI,
// SubReg carries the "Kill" flag when SubReg == SuperReg.
unsigned SubKillState = getKillRegState((NumSubRegs == 1) && IsKill);
for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
unsigned SubReg = NumSubRegs == 1 ?
SuperReg : getSubReg(SuperReg, SplitParts[i]);
Register SubReg =
NumSubRegs == 1 ? SuperReg : getSubReg(SuperReg, SplitParts[i]);
if (SpillToSMEM) {
int64_t FrOffset = FrameInfo.getObjectOffset(Index);
@ -924,7 +925,7 @@ bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI,
// Spill SGPR to a frame index.
// TODO: Should VI try to spill to VGPR and then spill to SMEM?
unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
// TODO: Should VI try to spill to VGPR and then spill to SMEM?
MachineInstrBuilder Mov
@ -1026,8 +1027,8 @@ bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI,
int64_t FrOffset = FrameInfo.getObjectOffset(Index);
for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
unsigned SubReg = NumSubRegs == 1 ?
SuperReg : getSubReg(SuperReg, SplitParts[i]);
Register SubReg =
NumSubRegs == 1 ? SuperReg : getSubReg(SuperReg, SplitParts[i]);
if (SpillToSMEM) {
// FIXME: Size may be > 4 but extra bytes wasted.
@ -1079,7 +1080,7 @@ bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI,
// Restore SGPR from a stack slot.
// FIXME: We should use S_LOAD_DWORD here for VI.
unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned Align = FrameInfo.getObjectAlignment(Index);
MachinePointerInfo PtrInfo
@ -1263,8 +1264,8 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
// In an entry function/kernel the offset is already the absolute
// address relative to the frame register.
unsigned DiffReg
= MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Register DiffReg =
MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
bool IsCopy = MI->getOpcode() == AMDGPU::V_MOV_B32_e32;
Register ResultReg = IsCopy ?
@ -1282,8 +1283,8 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
.addImm(Log2_32(ST.getWavefrontSize()))
.addReg(DiffReg);
} else {
unsigned ScaledReg
= MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register ScaledReg =
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), ScaledReg)
.addImm(Log2_32(ST.getWavefrontSize()))
@ -1296,8 +1297,8 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
.addReg(ScaledReg, RegState::Kill)
.addImm(0); // clamp bit
} else {
unsigned ConstOffsetReg
= MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Register ConstOffsetReg =
MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), ConstOffsetReg)
.addImm(Offset);
@ -1345,7 +1346,7 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
int64_t Offset = FrameInfo.getObjectOffset(Index);
FIOp.ChangeToImmediate(Offset);
if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) {
unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
.addImm(Offset);
FIOp.ChangeToRegister(TmpReg, false, false, true);

View File

@ -77,7 +77,7 @@ static bool foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
// Try to fold Src0
MachineOperand &Src0 = MI.getOperand(Src0Idx);
if (Src0.isReg()) {
unsigned Reg = Src0.getReg();
Register Reg = Src0.getReg();
if (Register::isVirtualRegister(Reg) && MRI.hasOneUse(Reg)) {
MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
if (Def && Def->isMoveImmediate()) {
@ -457,13 +457,13 @@ static MachineInstr* matchSwap(MachineInstr &MovT, MachineRegisterInfo &MRI,
assert(MovT.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
MovT.getOpcode() == AMDGPU::COPY);
unsigned T = MovT.getOperand(0).getReg();
Register T = MovT.getOperand(0).getReg();
unsigned Tsub = MovT.getOperand(0).getSubReg();
MachineOperand &Xop = MovT.getOperand(1);
if (!Xop.isReg())
return nullptr;
unsigned X = Xop.getReg();
Register X = Xop.getReg();
unsigned Xsub = Xop.getSubReg();
unsigned Size = TII->getOpSize(MovT, 0) / 4;
@ -482,7 +482,7 @@ static MachineInstr* matchSwap(MachineInstr &MovT, MachineRegisterInfo &MRI,
MovY.getOperand(1).getSubReg() != Tsub)
continue;
unsigned Y = MovY.getOperand(0).getReg();
Register Y = MovY.getOperand(0).getReg();
unsigned Ysub = MovY.getOperand(0).getSubReg();
if (!TRI.isVGPR(MRI, Y) || MovT.getParent() != MovY.getParent())
@ -717,7 +717,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
if (TII->isVOPC(Op32)) {
unsigned DstReg = MI.getOperand(0).getReg();
Register DstReg = MI.getOperand(0).getReg();
if (Register::isVirtualRegister(DstReg)) {
// VOPC instructions can only write to the VCC register. We can't
// force them to use VCC here, because this is only one register and
@ -741,7 +741,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
TII->getNamedOperand(MI, AMDGPU::OpName::src2);
if (!Src2->isReg())
continue;
unsigned SReg = Src2->getReg();
Register SReg = Src2->getReg();
if (Register::isVirtualRegister(SReg)) {
MRI.setRegAllocationHint(SReg, 0, VCCReg);
continue;

View File

@ -273,7 +273,7 @@ void SIWholeQuadMode::markInstructionUses(const MachineInstr &MI, char Flag,
if (!Use.isReg() || !Use.isUse())
continue;
unsigned Reg = Use.getReg();
Register Reg = Use.getReg();
// Handle physical registers that we need to track; this is mostly relevant
// for VCC, which can appear as the (implicit) input of a uniform branch,
@ -361,7 +361,7 @@ char SIWholeQuadMode::scanInstructions(MachineFunction &MF,
if (Inactive.isUndef()) {
LowerToCopyInstrs.push_back(&MI);
} else {
unsigned Reg = Inactive.getReg();
Register Reg = Inactive.getReg();
if (Register::isVirtualRegister(Reg)) {
for (MachineInstr &DefMI : MRI->def_instructions(Reg))
markInstruction(DefMI, StateWWM, Worklist);
@ -390,7 +390,7 @@ char SIWholeQuadMode::scanInstructions(MachineFunction &MF,
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
Register Reg = MO.getReg();
if (!Register::isVirtualRegister(Reg) &&
TRI->hasVectorRegisters(TRI->getPhysRegClass(Reg))) {
@ -556,7 +556,7 @@ bool SIWholeQuadMode::requiresCorrectState(const MachineInstr &MI) const {
MachineBasicBlock::iterator
SIWholeQuadMode::saveSCC(MachineBasicBlock &MBB,
MachineBasicBlock::iterator Before) {
unsigned SaveReg = MRI->createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Register SaveReg = MRI->createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
MachineInstr *Save =
BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), SaveReg)
@ -840,7 +840,7 @@ void SIWholeQuadMode::processBlock(MachineBasicBlock &MBB, unsigned LiveMaskReg,
void SIWholeQuadMode::lowerLiveMaskQueries(unsigned LiveMaskReg) {
for (MachineInstr *MI : LiveMaskQueries) {
const DebugLoc &DL = MI->getDebugLoc();
unsigned Dest = MI->getOperand(0).getReg();
Register Dest = MI->getOperand(0).getReg();
MachineInstr *Copy =
BuildMI(*MI->getParent(), MI, DL, TII->get(AMDGPU::COPY), Dest)
.addReg(LiveMaskReg);
@ -855,7 +855,7 @@ void SIWholeQuadMode::lowerCopyInstrs() {
for (unsigned i = MI->getNumExplicitOperands() - 1; i > 1; i--)
MI->RemoveOperand(i);
const unsigned Reg = MI->getOperand(0).getReg();
const Register Reg = MI->getOperand(0).getReg();
if (TRI->isVGPR(*MRI, Reg)) {
const TargetRegisterClass *regClass = Register::isVirtualRegister(Reg)

View File

@ -716,7 +716,7 @@ SDValue ARCTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
SDLoc dl(Op);
assert(cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() == 0 &&
"Only support lowering frame addr of current frame.");
unsigned FrameReg = ARI.getFrameRegister(MF);
Register FrameReg = ARI.getFrameRegister(MF);
return DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
}

Some files were not shown because too many files have changed in this diff Show More