misched preparation: rename core scheduler methods for consistency.

We had half the API with one convention, half with another. Now was a
good time to clean it up.

llvm-svn: 152255
This commit is contained in:
Andrew Trick 2012-03-07 23:00:49 +00:00
parent 22842f89e4
commit 52226d409b
15 changed files with 112 additions and 112 deletions

View File

@ -85,11 +85,11 @@ namespace llvm {
virtual void dump(ScheduleDAG* DAG) const;
// ScheduledNode - As nodes are scheduled, we look to see if there are any
// scheduledNode - As nodes are scheduled, we look to see if there are any
// successor nodes that have a single unscheduled predecessor. If so, that
// single predecessor has a higher priority, since scheduling it will make
// the node available.
void ScheduledNode(SUnit *Node);
void scheduledNode(SUnit *Node);
private:
void AdjustPriorityOfUnscheduledPreds(SUnit *SU);

View File

@ -76,7 +76,7 @@ namespace llvm {
public:
ResourcePriorityQueue(SelectionDAGISel *IS);
~ResourcePriorityQueue() {
delete ResourcesModel;
}
@ -126,8 +126,8 @@ namespace llvm {
virtual void dump(ScheduleDAG* DAG) const;
/// ScheduledNode - Main resource tracking point.
void ScheduledNode(SUnit *Node);
/// scheduledNode - Main resource tracking point.
void scheduledNode(SUnit *Node);
bool isResourceAvailable(SUnit *SU);
void reserveResources(SUnit *SU);

View File

@ -467,13 +467,13 @@ namespace llvm {
virtual void dump(ScheduleDAG *) const {}
/// ScheduledNode - As each node is scheduled, this method is invoked. This
/// scheduledNode - As each node is scheduled, this method is invoked. This
/// allows the priority function to adjust the priority of related
/// unscheduled nodes, for example.
///
virtual void ScheduledNode(SUnit *) {}
virtual void scheduledNode(SUnit *) {}
virtual void UnscheduledNode(SUnit *) {}
virtual void unscheduledNode(SUnit *) {}
void setCurCycle(unsigned Cycle) {
CurCycle = Cycle;
@ -543,18 +543,18 @@ namespace llvm {
protected:
/// ComputeLatency - Compute node latency.
///
virtual void ComputeLatency(SUnit *SU) = 0;
virtual void computeLatency(SUnit *SU) = 0;
/// ComputeOperandLatency - Override dependence edge latency using
/// operand use/def information
///
virtual void ComputeOperandLatency(SUnit *, SUnit *,
virtual void computeOperandLatency(SUnit *, SUnit *,
SDep&) const { }
/// ForceUnitLatencies - Return true if all scheduling edges should be given
/// a latency value of one. The default is to return false; schedulers may
/// override this as needed.
virtual bool ForceUnitLatencies() const { return false; }
virtual bool forceUnitLatencies() const { return false; }
private:
// Return the MCInstrDesc of this SDNode or NULL.

View File

@ -111,7 +111,7 @@ public:
DefaultVLIWScheduler(MachineFunction &MF, MachineLoopInfo &MLI,
MachineDominatorTree &MDT, bool IsPostRA);
// Schedule - Actual scheduling work.
void Schedule();
void schedule();
};
} // end anonymous namespace
@ -121,9 +121,9 @@ DefaultVLIWScheduler::DefaultVLIWScheduler(
ScheduleDAGInstrs(MF, MLI, MDT, IsPostRA) {
}
void DefaultVLIWScheduler::Schedule() {
void DefaultVLIWScheduler::schedule() {
// Build the scheduling graph.
BuildSchedGraph(0);
buildSchedGraph(0);
}
// VLIWPacketizerList Ctor
@ -186,7 +186,7 @@ void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB,
MachineBasicBlock::iterator EndItr) {
DefaultVLIWScheduler *Scheduler = (DefaultVLIWScheduler *)SchedulerImpl;
Scheduler->enterRegion(MBB, BeginItr, EndItr, MBB->size());
Scheduler->Schedule();
Scheduler->schedule();
Scheduler->exitRegion();
// Remember scheduling units.

View File

@ -84,11 +84,11 @@ void LatencyPriorityQueue::push(SUnit *SU) {
}
// ScheduledNode - As nodes are scheduled, we look to see if there are any
// scheduledNode - As nodes are scheduled, we look to see if there are any
// successor nodes that have a single unscheduled predecessor. If so, that
// single predecessor has a higher priority, since scheduling it will make
// the node available.
void LatencyPriorityQueue::ScheduledNode(SUnit *SU) {
void LatencyPriorityQueue::scheduledNode(SUnit *SU) {
for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
AdjustPriorityOfUnscheduledPreds(I->getSUnit());

View File

@ -160,7 +160,7 @@ public:
Pass(P) {}
/// ScheduleDAGInstrs callback.
void Schedule();
void schedule();
/// Interface implemented by the selected top-down liveinterval scheduler.
///
@ -203,10 +203,10 @@ void ScheduleTopDownLive::releaseSuccessors(SUnit *SU) {
}
}
/// Schedule - This is called back from ScheduleDAGInstrs::Run() when it's
/// schedule - This is called back from ScheduleDAGInstrs::Run() when it's
/// time to do some work.
void ScheduleTopDownLive::Schedule() {
BuildSchedGraph(&Pass->getAnalysis<AliasAnalysis>());
void ScheduleTopDownLive::schedule() {
buildSchedGraph(&Pass->getAnalysis<AliasAnalysis>());
DEBUG(dbgs() << "********** MI Scheduling **********\n");
DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
@ -273,7 +273,7 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
unsigned RemainingCount = MBB->size();
for(MachineBasicBlock::iterator RegionEnd = MBB->end();
RegionEnd != MBB->begin();) {
Scheduler->StartBlock(MBB);
Scheduler->startBlock(MBB);
// The next region starts above the previous region. Look backward in the
// instruction stream until we find the nearest boundary.
MachineBasicBlock::iterator I = RegionEnd;
@ -301,8 +301,8 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
dbgs() << " Remaining: " << RemainingCount << "\n");
// Inform ScheduleDAGInstrs of the region being scheduled. It calls back
// to our Schedule() method.
Scheduler->Schedule();
// to our schedule() method.
Scheduler->schedule();
Scheduler->exitRegion();
// Scheduling has invalidated the current iterator 'I'. Ask the
@ -310,7 +310,7 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
RegionEnd = Scheduler->begin();
}
assert(RemainingCount == 0 && "Instruction count mismatch!");
Scheduler->FinishBlock();
Scheduler->finishBlock();
}
return true;
}
@ -331,9 +331,9 @@ public:
ScheduleDAGInstrs(*P->MF, *P->MLI, *P->MDT, /*IsPostRA=*/false, P->LIS),
Pass(P) {}
/// Schedule - This is called back from ScheduleDAGInstrs::Run() when it's
/// schedule - This is called back from ScheduleDAGInstrs::Run() when it's
/// time to do some work.
void Schedule();
void schedule();
};
} // namespace
@ -348,8 +348,8 @@ SchedDefaultRegistry("default", "Activate the scheduler pass, "
/// Schedule - This is called back from ScheduleDAGInstrs::Run() when it's
/// time to do some work.
void DefaultMachineScheduler::Schedule() {
BuildSchedGraph(&Pass->getAnalysis<AliasAnalysis>());
void DefaultMachineScheduler::schedule() {
buildSchedGraph(&Pass->getAnalysis<AliasAnalysis>());
DEBUG(dbgs() << "********** MI Scheduling **********\n");
DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)

View File

@ -139,10 +139,10 @@ namespace {
~SchedulePostRATDList();
/// StartBlock - Initialize register live-range state for scheduling in
/// startBlock - Initialize register live-range state for scheduling in
/// this block.
///
void StartBlock(MachineBasicBlock *BB);
void startBlock(MachineBasicBlock *BB);
/// Initialize the scheduler state for the next scheduling region.
virtual void enterRegion(MachineBasicBlock *bb,
@ -155,7 +155,7 @@ namespace {
/// Schedule - Schedule the instruction range using list scheduling.
///
void Schedule();
void schedule();
void EmitSchedule();
@ -164,9 +164,9 @@ namespace {
///
void Observe(MachineInstr *MI, unsigned Count);
/// FinishBlock - Clean up register live-range state.
/// finishBlock - Clean up register live-range state.
///
void FinishBlock();
void finishBlock();
/// FixupKills - Fix register kill flags that have been made
/// invalid due to scheduling
@ -301,7 +301,7 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
#endif
// Initialize register live-range state for scheduling in this block.
Scheduler.StartBlock(MBB);
Scheduler.startBlock(MBB);
// Schedule each sequence of instructions not interrupted by a label
// or anything else that effectively needs to shut down scheduling.
@ -314,7 +314,7 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
// don't need to worry about register pressure.
if (MI->isCall() || TII->isSchedulingBoundary(MI, MBB, Fn)) {
Scheduler.enterRegion(MBB, I, Current, CurrentCount);
Scheduler.Schedule();
Scheduler.schedule();
Scheduler.exitRegion();
Scheduler.EmitSchedule();
Current = MI;
@ -330,12 +330,12 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
assert((MBB->begin() == Current || CurrentCount != 0) &&
"Instruction count mismatch!");
Scheduler.enterRegion(MBB, MBB->begin(), Current, CurrentCount);
Scheduler.Schedule();
Scheduler.schedule();
Scheduler.exitRegion();
Scheduler.EmitSchedule();
// Clean up register live-range state.
Scheduler.FinishBlock();
Scheduler.finishBlock();
// Update register kills
Scheduler.FixupKills(MBB);
@ -347,9 +347,9 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
/// StartBlock - Initialize register live-range state for scheduling in
/// this block.
///
void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
void SchedulePostRATDList::startBlock(MachineBasicBlock *BB) {
// Call the superclass.
ScheduleDAGInstrs::StartBlock(BB);
ScheduleDAGInstrs::startBlock(BB);
// Reset the hazard recognizer and anti-dep breaker.
HazardRec->Reset();
@ -359,9 +359,9 @@ void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
/// Schedule - Schedule the instruction range using list scheduling.
///
void SchedulePostRATDList::Schedule() {
void SchedulePostRATDList::schedule() {
// Build the scheduling graph.
BuildSchedGraph(AA);
buildSchedGraph(AA);
if (AntiDepBreak != NULL) {
unsigned Broken =
@ -376,7 +376,7 @@ void SchedulePostRATDList::Schedule() {
// that register, and add new anti-dependence and output-dependence
// edges based on the next live range of the register.
ScheduleDAG::clearDAG();
BuildSchedGraph(AA);
buildSchedGraph(AA);
NumFixedAnti += Broken;
}
@ -401,12 +401,12 @@ void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
/// FinishBlock - Clean up register live-range state.
///
void SchedulePostRATDList::FinishBlock() {
void SchedulePostRATDList::finishBlock() {
if (AntiDepBreak != NULL)
AntiDepBreak->FinishBlock();
// Call the superclass.
ScheduleDAGInstrs::FinishBlock();
ScheduleDAGInstrs::finishBlock();
}
/// StartBlockForKills - Initialize register live-range state for updating kills
@ -635,7 +635,7 @@ void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
ReleaseSuccessors(SU);
SU->isScheduled = true;
AvailableQueue.ScheduledNode(SU);
AvailableQueue.scheduledNode(SU);
}
/// ListScheduleTopDown - The main loop of list scheduling for top-down

View File

@ -125,14 +125,14 @@ static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI,
return 0;
}
void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) {
void ScheduleDAGInstrs::startBlock(MachineBasicBlock *BB) {
LoopRegs.Deps.clear();
if (MachineLoop *ML = MLI.getLoopFor(BB))
if (BB == ML->getLoopLatch())
LoopRegs.VisitLoop(ML);
}
void ScheduleDAGInstrs::FinishBlock() {
void ScheduleDAGInstrs::finishBlock() {
// Nothing to do.
}
@ -164,7 +164,7 @@ void ScheduleDAGInstrs::enterRegion(MachineBasicBlock *bb,
InsertPosIndex = endcount;
// Check to see if the scheduler cares about latencies.
UnitLatencies = ForceUnitLatencies();
UnitLatencies = forceUnitLatencies();
ScheduleDAG::clearDAG();
}
@ -175,7 +175,7 @@ void ScheduleDAGInstrs::exitRegion() {
// Nothing to do.
}
/// AddSchedBarrierDeps - Add dependencies from instructions in the current
/// addSchedBarrierDeps - Add dependencies from instructions in the current
/// list of instructions being scheduled to scheduling barrier by adding
/// the exit SU to the register defs and use list. This is because we want to
/// make sure instructions which define registers that are either used by
@ -183,7 +183,7 @@ void ScheduleDAGInstrs::exitRegion() {
/// especially important when the definition latency of the return value(s)
/// are too high to be hidden by the branch or when the liveout registers
/// used by instructions in the fallthrough block.
void ScheduleDAGInstrs::AddSchedBarrierDeps() {
void ScheduleDAGInstrs::addSchedBarrierDeps() {
MachineInstr *ExitMI = InsertPos != BB->end() ? &*InsertPos : 0;
ExitSU.setInstr(ExitMI);
bool AllDepKnown = ExitMI &&
@ -259,7 +259,7 @@ void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU,
// perform its own adjustments.
const SDep& dep = SDep(SU, SDep::Data, LDataLatency, *Alias);
if (!UnitLatencies) {
ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep));
computeOperandLatency(SU, UseSU, const_cast<SDep &>(dep));
ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep));
}
UseSU->addPred(dep);
@ -449,7 +449,7 @@ void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) {
if (!UnitLatencies) {
// Adjust the dependence latency using operand def/use information, then
// allow the target to perform its own adjustments.
ComputeOperandLatency(DefSU, SU, const_cast<SDep &>(dep));
computeOperandLatency(DefSU, SU, const_cast<SDep &>(dep));
const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
ST.adjustSchedDependency(DefSU, SU, const_cast<SDep &>(dep));
}
@ -481,7 +481,7 @@ void ScheduleDAGInstrs::initSUnits() {
if (MI->isDebugValue())
continue;
SUnit *SU = NewSUnit(MI);
SUnit *SU = newSUnit(MI);
MISUnitMap[MI] = SU;
SU->isCall = MI->isCall();
@ -491,11 +491,11 @@ void ScheduleDAGInstrs::initSUnits() {
if (UnitLatencies)
SU->Latency = 1;
else
ComputeLatency(SU);
computeLatency(SU);
}
}
void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA) {
// Create an SUnit for each real instruction.
initSUnits();
@ -530,7 +530,7 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
// Model data dependencies between instructions being scheduled and the
// ExitSU.
AddSchedBarrierDeps();
addSchedBarrierDeps();
// Walk the list of instructions, from bottom moving up.
MachineInstr *PrevMI = NULL;
@ -728,7 +728,7 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
MISUnitMap.clear();
}
void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) {
void ScheduleDAGInstrs::computeLatency(SUnit *SU) {
// Compute the latency for the node.
if (!InstrItins || InstrItins->isEmpty()) {
SU->Latency = 1;
@ -742,7 +742,7 @@ void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) {
}
}
void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use,
void ScheduleDAGInstrs::computeOperandLatency(SUnit *Def, SUnit *Use,
SDep& dep) const {
if (!InstrItins || InstrItins->isEmpty())
return;

View File

@ -243,7 +243,7 @@ namespace llvm {
/// NewSUnit - Creates a new SUnit and return a ptr to it.
///
SUnit *NewSUnit(MachineInstr *MI) {
SUnit *newSUnit(MachineInstr *MI) {
#ifndef NDEBUG
const SUnit *Addr = SUnits.empty() ? 0 : &SUnits[0];
#endif
@ -254,13 +254,13 @@ namespace llvm {
return &SUnits.back();
}
/// StartBlock - Prepare to perform scheduling in the given block.
/// startBlock - Prepare to perform scheduling in the given block.
///
virtual void StartBlock(MachineBasicBlock *BB);
virtual void startBlock(MachineBasicBlock *BB);
/// FinishBlock - Clean up after scheduling in the given block.
/// finishBlock - Clean up after scheduling in the given block.
///
virtual void FinishBlock();
virtual void finishBlock();
/// Initialize the scheduler state for the next scheduling region.
virtual void enterRegion(MachineBasicBlock *bb,
@ -271,35 +271,35 @@ namespace llvm {
/// Notify that the scheduler has finished scheduling the current region.
virtual void exitRegion();
/// BuildSchedGraph - Build SUnits from the MachineBasicBlock that we are
/// buildSchedGraph - Build SUnits from the MachineBasicBlock that we are
/// input.
void BuildSchedGraph(AliasAnalysis *AA);
void buildSchedGraph(AliasAnalysis *AA);
/// AddSchedBarrierDeps - Add dependencies from instructions in the current
/// addSchedBarrierDeps - Add dependencies from instructions in the current
/// list of instructions being scheduled to scheduling barrier. We want to
/// make sure instructions which define registers that are either used by
/// the terminator or are live-out are properly scheduled. This is
/// especially important when the definition latency of the return value(s)
/// are too high to be hidden by the branch or when the liveout registers
/// used by instructions in the fallthrough block.
void AddSchedBarrierDeps();
void addSchedBarrierDeps();
/// ComputeLatency - Compute node latency.
/// computeLatency - Compute node latency.
///
virtual void ComputeLatency(SUnit *SU);
virtual void computeLatency(SUnit *SU);
/// ComputeOperandLatency - Override dependence edge latency using
/// computeOperandLatency - Override dependence edge latency using
/// operand use/def information
///
virtual void ComputeOperandLatency(SUnit *Def, SUnit *Use,
virtual void computeOperandLatency(SUnit *Def, SUnit *Use,
SDep& dep) const;
/// Schedule - Order nodes according to selected style, filling
/// schedule - Order nodes according to selected style, filling
/// in the Sequence member.
///
/// Typically, a scheduling algorithm will implement Schedule() without
/// Typically, a scheduling algorithm will implement schedule() without
/// overriding enterRegion() or exitRegion().
virtual void Schedule() = 0;
virtual void schedule() = 0;
virtual void dumpNode(const SUnit *SU) const;

View File

@ -470,7 +470,7 @@ signed ResourcePriorityQueue::SUSchedulingCost(SUnit *SU) {
/// Main resource tracking point.
void ResourcePriorityQueue::ScheduledNode(SUnit *SU) {
void ResourcePriorityQueue::scheduledNode(SUnit *SU) {
// Use NULL entry as an event marker to reset
// the DFA state.
if (!SU) {

View File

@ -101,8 +101,8 @@ private:
bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
void ListScheduleBottomUp();
/// ForceUnitLatencies - The fast scheduler doesn't care about real latencies.
bool ForceUnitLatencies() const { return true; }
/// forceUnitLatencies - The fast scheduler doesn't care about real latencies.
bool forceUnitLatencies() const { return true; }
};
} // end anonymous namespace
@ -245,7 +245,7 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
SDValue(LoadNode, 1));
SUnit *NewSU = NewSUnit(N);
SUnit *NewSU = newSUnit(N);
assert(N->getNodeId() == -1 && "Node already inserted!");
N->setNodeId(NewSU->NodeNum);
@ -268,7 +268,7 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
LoadSU = &SUnits[LoadNode->getNodeId()];
isNewLoad = false;
} else {
LoadSU = NewSUnit(LoadNode);
LoadSU = newSUnit(LoadNode);
LoadNode->setNodeId(LoadSU->NodeNum);
}
@ -381,11 +381,11 @@ void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC,
SmallVector<SUnit*, 2> &Copies) {
SUnit *CopyFromSU = NewSUnit(static_cast<SDNode *>(NULL));
SUnit *CopyFromSU = newSUnit(static_cast<SDNode *>(NULL));
CopyFromSU->CopySrcRC = SrcRC;
CopyFromSU->CopyDstRC = DestRC;
SUnit *CopyToSU = NewSUnit(static_cast<SDNode *>(NULL));
SUnit *CopyToSU = newSUnit(static_cast<SDNode *>(NULL));
CopyToSU->CopySrcRC = DestRC;
CopyToSU->CopyDstRC = SrcRC;

View File

@ -232,7 +232,7 @@ private:
/// Updates the topological ordering if required.
SUnit *CreateNewSUnit(SDNode *N) {
unsigned NumSUnits = SUnits.size();
SUnit *NewNode = NewSUnit(N);
SUnit *NewNode = newSUnit(N);
// Update the topological ordering.
if (NewNode->NodeNum >= NumSUnits)
Topo.InitDAGTopologicalSorting();
@ -250,9 +250,9 @@ private:
return NewNode;
}
/// ForceUnitLatencies - Register-pressure-reducing scheduling doesn't
/// forceUnitLatencies - Register-pressure-reducing scheduling doesn't
/// need actual latency information but the hybrid scheduler does.
bool ForceUnitLatencies() const {
bool forceUnitLatencies() const {
return !NeedLatency;
}
};
@ -354,7 +354,7 @@ void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
#endif
--PredSU->NumSuccsLeft;
if (!ForceUnitLatencies()) {
if (!forceUnitLatencies()) {
// Updating predecessor's height. This is now the cycle when the
// predecessor can be scheduled without causing a pipeline stall.
PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency());
@ -701,7 +701,7 @@ void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
Sequence.push_back(SU);
AvailableQueue->ScheduledNode(SU);
AvailableQueue->scheduledNode(SU);
// If HazardRec is disabled, and each inst counts as one cycle, then
// advance CurCycle before ReleasePredecessors to avoid useless pushes to
@ -848,7 +848,7 @@ void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
else {
AvailableQueue->push(SU);
}
AvailableQueue->UnscheduledNode(SU);
AvailableQueue->unscheduledNode(SU);
}
/// After backtracking, the hazard checker needs to be restored to a state
@ -969,7 +969,7 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
LoadNode->setNodeId(LoadSU->NodeNum);
InitNumRegDefsLeft(LoadSU);
ComputeLatency(LoadSU);
computeLatency(LoadSU);
}
SUnit *NewSU = CreateNewSUnit(N);
@ -987,7 +987,7 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
NewSU->isCommutable = true;
InitNumRegDefsLeft(NewSU);
ComputeLatency(NewSU);
computeLatency(NewSU);
// Record all the edges to and from the old SU, by category.
SmallVector<SDep, 4> ChainPreds;
@ -1687,9 +1687,9 @@ public:
int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const;
void ScheduledNode(SUnit *SU);
void scheduledNode(SUnit *SU);
void UnscheduledNode(SUnit *SU);
void unscheduledNode(SUnit *SU);
protected:
bool canClobber(const SUnit *SU, const SUnit *Op);
@ -1990,7 +1990,7 @@ int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const {
return PDiff;
}
void RegReductionPQBase::ScheduledNode(SUnit *SU) {
void RegReductionPQBase::scheduledNode(SUnit *SU) {
if (!TracksRegPressure)
return;
@ -2059,7 +2059,7 @@ void RegReductionPQBase::ScheduledNode(SUnit *SU) {
dumpRegPressure();
}
void RegReductionPQBase::UnscheduledNode(SUnit *SU) {
void RegReductionPQBase::unscheduledNode(SUnit *SU) {
if (!TracksRegPressure)
return;

View File

@ -65,7 +65,7 @@ void ScheduleDAGSDNodes::Run(SelectionDAG *dag, MachineBasicBlock *bb) {
/// NewSUnit - Creates a new SUnit and return a ptr to it.
///
SUnit *ScheduleDAGSDNodes::NewSUnit(SDNode *N) {
SUnit *ScheduleDAGSDNodes::newSUnit(SDNode *N) {
#ifndef NDEBUG
const SUnit *Addr = 0;
if (!SUnits.empty())
@ -87,7 +87,7 @@ SUnit *ScheduleDAGSDNodes::NewSUnit(SDNode *N) {
}
SUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) {
SUnit *SU = NewSUnit(Old->getNode());
SUnit *SU = newSUnit(Old->getNode());
SU->OrigNode = Old->OrigNode;
SU->Latency = Old->Latency;
SU->isVRegCycle = Old->isVRegCycle;
@ -310,7 +310,7 @@ void ScheduleDAGSDNodes::BuildSchedUnits() {
// If this node has already been processed, stop now.
if (NI->getNodeId() != -1) continue;
SUnit *NodeSUnit = NewSUnit(NI);
SUnit *NodeSUnit = newSUnit(NI);
// See if anything is glued to this node, if so, add them to glued
// nodes. Nodes can have at most one glue input and one glue output. Glue
@ -368,7 +368,7 @@ void ScheduleDAGSDNodes::BuildSchedUnits() {
InitNumRegDefsLeft(NodeSUnit);
// Assign the Latency field of NodeSUnit using target-provided information.
ComputeLatency(NodeSUnit);
computeLatency(NodeSUnit);
}
// Find all call operands.
@ -390,7 +390,7 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
// Check to see if the scheduler cares about latencies.
bool UnitLatencies = ForceUnitLatencies();
bool UnitLatencies = forceUnitLatencies();
// Pass 2: add the preds, succs, etc.
for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
@ -456,7 +456,7 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
const SDep &dep = SDep(OpSU, isChain ? SDep::Order : SDep::Data,
OpLatency, PhysReg);
if (!isChain && !UnitLatencies) {
ComputeOperandLatency(OpN, N, i, const_cast<SDep &>(dep));
computeOperandLatency(OpN, N, i, const_cast<SDep &>(dep));
ST.adjustSchedDependency(OpSU, SU, const_cast<SDep &>(dep));
}
@ -549,7 +549,7 @@ void ScheduleDAGSDNodes::InitNumRegDefsLeft(SUnit *SU) {
}
}
void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) {
void ScheduleDAGSDNodes::computeLatency(SUnit *SU) {
SDNode *N = SU->getNode();
// TokenFactor operands are considered zero latency, and some schedulers
@ -561,7 +561,7 @@ void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) {
}
// Check to see if the scheduler cares about latencies.
if (ForceUnitLatencies()) {
if (forceUnitLatencies()) {
SU->Latency = 1;
return;
}
@ -583,10 +583,10 @@ void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) {
SU->Latency += TII->getInstrLatency(InstrItins, N);
}
void ScheduleDAGSDNodes::ComputeOperandLatency(SDNode *Def, SDNode *Use,
void ScheduleDAGSDNodes::computeOperandLatency(SDNode *Def, SDNode *Use,
unsigned OpIdx, SDep& dep) const{
// Check to see if the scheduler cares about latencies.
if (ForceUnitLatencies())
if (forceUnitLatencies())
return;
if (dep.getKind() != SDep::Data)

View File

@ -71,7 +71,7 @@ namespace llvm {
/// NewSUnit - Creates a new SUnit and return a ptr to it.
///
SUnit *NewSUnit(SDNode *N);
SUnit *newSUnit(SDNode *N);
/// Clone - Creates a clone of the specified SUnit. It does not copy the
/// predecessors / successors info nor the temporary scheduling states.
@ -94,17 +94,17 @@ namespace llvm {
///
void InitNumRegDefsLeft(SUnit *SU);
/// ComputeLatency - Compute node latency.
/// computeLatency - Compute node latency.
///
virtual void ComputeLatency(SUnit *SU);
virtual void computeLatency(SUnit *SU);
/// ComputeOperandLatency - Override dependence edge latency using
/// computeOperandLatency - Override dependence edge latency using
/// operand use/def information
///
virtual void ComputeOperandLatency(SUnit *Def, SUnit *Use,
virtual void computeOperandLatency(SUnit *Def, SUnit *Use,
SDep& dep) const { }
virtual void ComputeOperandLatency(SDNode *Def, SDNode *Use,
virtual void computeOperandLatency(SDNode *Def, SDNode *Use,
unsigned OpIdx, SDep& dep) const;
/// Schedule - Order nodes according to selected style, filling

View File

@ -158,7 +158,7 @@ void ScheduleDAGVLIW::scheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
releaseSuccessors(SU);
SU->isScheduled = true;
AvailableQueue->ScheduledNode(SU);
AvailableQueue->scheduledNode(SU);
}
/// listScheduleTopDown - The main loop of list scheduling for top-down
@ -202,7 +202,7 @@ void ScheduleDAGVLIW::listScheduleTopDown() {
// don't advance the hazard recognizer.
if (AvailableQueue->empty()) {
// Reset DFA state.
AvailableQueue->ScheduledNode(0);
AvailableQueue->scheduledNode(0);
++CurCycle;
continue;
}