Add a "moveInstr" method to LiveIntervals. This can be used to move instructions

around within a basic block while maintaining live-intervals.

Updated ScheduleTopDownLive in MachineScheduler.cpp to use the moveInstr API
when reordering MIs.

llvm-svn: 149147
This commit is contained in:
Lang Hames 2012-01-27 22:36:19 +00:00
parent 6f108d9229
commit ad33d5ace7
4 changed files with 234 additions and 1 deletions

View File

@ -273,6 +273,11 @@ namespace llvm {
/// register.
void addKillFlags();
/// moveInstr - Move MachineInstr mi to insertPt, updating the live
/// intervals of mi's operands to reflect the new position. The insertion
/// point can be above or below mi, but must be in the same basic block.
void moveInstr(MachineBasicBlock::iterator insertPt, MachineInstr* mi);
private:
/// computeIntervals - Compute live intervals.
void computeIntervals();

View File

@ -797,6 +797,207 @@ void LiveIntervals::addKillFlags() {
}
}
static bool intervalRangesSane(const LiveInterval& li) {
if (li.empty()) {
return true;
}
SlotIndex lastEnd = li.begin()->start;
for (LiveInterval::const_iterator lrItr = li.begin(), lrEnd = li.end();
lrItr != lrEnd; ++lrItr) {
const LiveRange& lr = *lrItr;
if (lastEnd > lr.start || lr.start >= lr.end)
return false;
lastEnd = lr.end;
}
return true;
}
template <typename DefSetT>
static void handleMoveDefs(LiveIntervals& lis, SlotIndex origIdx,
SlotIndex miIdx, const DefSetT& defs) {
for (typename DefSetT::const_iterator defItr = defs.begin(),
defEnd = defs.end();
defItr != defEnd; ++defItr) {
unsigned def = *defItr;
LiveInterval& li = lis.getInterval(def);
LiveRange* lr = li.getLiveRangeContaining(origIdx.getRegSlot());
assert(lr != 0 && "No range for def?");
lr->start = miIdx.getRegSlot();
lr->valno->def = miIdx.getRegSlot();
assert(intervalRangesSane(li) && "Broke live interval moving def.");
}
}
template <typename DeadDefSetT>
static void handleMoveDeadDefs(LiveIntervals& lis, SlotIndex origIdx,
SlotIndex miIdx, const DeadDefSetT& deadDefs) {
for (typename DeadDefSetT::const_iterator deadDefItr = deadDefs.begin(),
deadDefEnd = deadDefs.end();
deadDefItr != deadDefEnd; ++deadDefItr) {
unsigned deadDef = *deadDefItr;
LiveInterval& li = lis.getInterval(deadDef);
LiveRange* lr = li.getLiveRangeContaining(origIdx.getRegSlot());
assert(lr != 0 && "No range for dead def?");
assert(lr->start == origIdx.getRegSlot() && "Bad dead range start?");
assert(lr->end == origIdx.getDeadSlot() && "Bad dead range end?");
assert(lr->valno->def == origIdx.getRegSlot() && "Bad dead valno def.");
LiveRange t(*lr);
t.start = miIdx.getRegSlot();
t.valno->def = miIdx.getRegSlot();
t.end = miIdx.getDeadSlot();
li.removeRange(*lr);
li.addRange(t);
assert(intervalRangesSane(li) && "Broke live interval moving dead def.");
}
}
template <typename ECSetT>
static void handleMoveECs(LiveIntervals& lis, SlotIndex origIdx,
SlotIndex miIdx, const ECSetT& ecs) {
for (typename ECSetT::const_iterator ecItr = ecs.begin(), ecEnd = ecs.end();
ecItr != ecEnd; ++ecItr) {
unsigned ec = *ecItr;
LiveInterval& li = lis.getInterval(ec);
LiveRange* lr = li.getLiveRangeContaining(origIdx.getRegSlot(true));
assert(lr != 0 && "No range for early clobber?");
assert(lr->start == origIdx.getRegSlot(true) && "Bad EC range start?");
assert(lr->end == origIdx.getRegSlot() && "Bad EC range end.");
assert(lr->valno->def == origIdx.getRegSlot(true) && "Bad EC valno def.");
LiveRange t(*lr);
t.start = miIdx.getRegSlot(true);
t.valno->def = miIdx.getRegSlot(true);
t.end = miIdx.getRegSlot();
li.removeRange(*lr);
li.addRange(t);
assert(intervalRangesSane(li) && "Broke live interval moving EC.");
}
}
template <typename UseSetT>
static void handleMoveUses(const MachineBasicBlock *mbb,
const MachineRegisterInfo& mri,
const BitVector& reservedRegs, LiveIntervals &lis,
SlotIndex origIdx, SlotIndex miIdx,
const UseSetT &uses) {
bool movingUp = miIdx < origIdx;
for (typename UseSetT::const_iterator usesItr = uses.begin(),
usesEnd = uses.end();
usesItr != usesEnd; ++usesItr) {
unsigned use = *usesItr;
if (!lis.hasInterval(use))
continue;
if (TargetRegisterInfo::isPhysicalRegister(use) && reservedRegs.test(use))
continue;
LiveInterval& li = lis.getInterval(use);
LiveRange* lr = li.getLiveRangeBefore(origIdx.getRegSlot());
assert(lr != 0 && "No range for use?");
bool liveThrough = lr->end > origIdx.getRegSlot();
if (movingUp) {
// If moving up and liveThrough - nothing to do.
// If not live through we need to extend the range to the last use
// between the old location and the new one.
if (!liveThrough) {
SlotIndex lastUseInRange = miIdx.getRegSlot();
for (MachineRegisterInfo::use_iterator useI = mri.use_begin(use),
useE = mri.use_end();
useI != useE; ++useI) {
const MachineInstr* mopI = &*useI;
const MachineOperand& mop = useI.getOperand();
SlotIndex instSlot = lis.getSlotIndexes()->getInstructionIndex(mopI);
SlotIndex opSlot = instSlot.getRegSlot(mop.isEarlyClobber());
if (opSlot >= lastUseInRange && opSlot < origIdx) {
lastUseInRange = opSlot;
}
}
lr->end = lastUseInRange;
}
} else {
// Moving down is easy - the existing live range end tells us where
// the last kill is.
if (!liveThrough) {
// Easy fix - just update the range endpoint.
lr->end = miIdx.getRegSlot();
} else {
bool liveOut = lr->end >= lis.getSlotIndexes()->getMBBEndIdx(mbb);
if (!liveOut && miIdx.getRegSlot() > lr->end) {
lr->end = miIdx.getRegSlot();
}
}
}
assert(intervalRangesSane(li) && "Broke live interval moving use.");
}
}
void LiveIntervals::moveInstr(MachineBasicBlock::iterator insertPt,
MachineInstr *mi) {
MachineBasicBlock* mbb = mi->getParent();
assert(insertPt == mbb->end() || insertPt->getParent() == mbb &&
"Cannot handle moves across basic block boundaries.");
assert(&*insertPt != mi && "No-op move requested?");
assert(!mi->isInsideBundle() && "Can't handle bundled instructions yet.");
// Grab the original instruction index.
SlotIndex origIdx = indexes_->getInstructionIndex(mi);
// Move the machine instr and obtain its new index.
indexes_->removeMachineInstrFromMaps(mi);
mbb->remove(mi);
mbb->insert(insertPt, mi);
SlotIndex miIdx = indexes_->insertMachineInstrInMaps(mi);
// Pick the direction.
bool movingUp = miIdx < origIdx;
// Collect the operands.
DenseSet<unsigned> uses, defs, deadDefs, ecs;
for (MachineInstr::mop_iterator mopItr = mi->operands_begin(),
mopEnd = mi->operands_end();
mopItr != mopEnd; ++mopItr) {
const MachineOperand& mop = *mopItr;
if (!mop.isReg() || mop.getReg() == 0)
continue;
unsigned reg = mop.getReg();
if (mop.isUse()) {
assert(mop.readsReg());
}
if (mop.readsReg() && !ecs.count(reg)) {
uses.insert(reg);
}
if (mop.isDef()) {
if (mop.isDead()) {
assert(!defs.count(reg) && "Can't mix defs with dead-defs.");
deadDefs.insert(reg);
} else if (mop.isEarlyClobber()) {
uses.erase(reg);
ecs.insert(reg);
} else {
assert(!deadDefs.count(reg) && "Can't mix defs with dead-defs.");
defs.insert(reg);
}
}
}
BitVector reservedRegs(tri_->getReservedRegs(*mbb->getParent()));
if (movingUp) {
handleMoveUses(mbb, *mri_, reservedRegs, *this, origIdx, miIdx, uses);
handleMoveECs(*this, origIdx, miIdx, ecs);
handleMoveDeadDefs(*this, origIdx, miIdx, deadDefs);
handleMoveDefs(*this, origIdx, miIdx, defs);
} else {
handleMoveDefs(*this, origIdx, miIdx, defs);
handleMoveDeadDefs(*this, origIdx, miIdx, deadDefs);
handleMoveECs(*this, origIdx, miIdx, ecs);
handleMoveUses(mbb, *mri_, reservedRegs, *this, origIdx, miIdx, uses);
}
}
/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
/// allow one) virtual register operand, then its uses are implicitly using
/// the register. Returns the virtual register.

View File

@ -43,6 +43,7 @@ public:
const TargetInstrInfo *TII;
const MachineLoopInfo *MLI;
const MachineDominatorTree *MDT;
LiveIntervals *LIS;
MachineScheduler();
@ -236,7 +237,7 @@ void ScheduleTopDownLive::Schedule() {
if (&*InsertPos == MI)
++InsertPos;
else {
BB->splice(InsertPos, BB, MI);
Pass->LIS->moveInstr(InsertPos, MI);
if (Begin == InsertPos)
Begin = MI;
}
@ -253,6 +254,7 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
MF = &mf;
MLI = &getAnalysis<MachineLoopInfo>();
MDT = &getAnalysis<MachineDominatorTree>();
LIS = &getAnalysis<LiveIntervals>();
TII = MF->getTarget().getInstrInfo();
// Select the scheduler, or set the default.

View File

@ -841,6 +841,19 @@ bool RegisterCoalescer::ReMaterializeTrivialDef(LiveInterval &SrcInt,
TII->reMaterialize(*MBB, MII, DstReg, 0, DefMI, *TRI);
MachineInstr *NewMI = prior(MII);
// NewMI may have dead implicit defs (E.g. EFLAGS for MOV<bits>r0 on X86).
// We need to remember these so we can add intervals once we insert
// NewMI into SlotIndexes.
SmallVector<unsigned, 4> NewMIImplDefs;
for (unsigned i = NewMI->getDesc().getNumOperands(),
e = NewMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = NewMI->getOperand(i);
if (MO.isReg()) {
assert(MO.isDef() && MO.isImplicit() && MO.isDead());
NewMIImplDefs.push_back(MO.getReg());
}
}
// CopyMI may have implicit operands, transfer them over to the newly
// rematerialized instruction. And update implicit def interval valnos.
for (unsigned i = CopyMI->getDesc().getNumOperands(),
@ -853,6 +866,18 @@ bool RegisterCoalescer::ReMaterializeTrivialDef(LiveInterval &SrcInt,
}
LIS->ReplaceMachineInstrInMaps(CopyMI, NewMI);
SlotIndex NewMIIdx = LIS->getInstructionIndex(NewMI);
for (unsigned i = 0, e = NewMIImplDefs.size(); i != e; ++i) {
unsigned reg = NewMIImplDefs[i];
LiveInterval &li = LIS->getInterval(reg);
VNInfo *DeadDefVN = li.getNextValue(NewMIIdx.getRegSlot(), 0,
LIS->getVNInfoAllocator());
LiveRange lr(NewMIIdx.getRegSlot(), NewMIIdx.getDeadSlot(), DeadDefVN);
li.addRange(lr);
}
NewMI->copyImplicitOps(CopyMI);
CopyMI->eraseFromParent();
ReMatCopies.insert(CopyMI);
ReMatDefs.insert(DefMI);