[MCA] Moved six instruction flags from InstrDesc to InstructionBase.

Differential Revision: https://reviews.llvm.org/D121508
This commit is contained in:
Patrick Holland 2022-03-11 19:52:36 -08:00
parent 0c4bbd293e
commit 55cedf9cc5
9 changed files with 87 additions and 57 deletions

View File

@ -49,6 +49,11 @@ public:
/// scheduling model.
virtual void postProcessInstruction(std::unique_ptr<Instruction> &Inst,
const MCInst &MCI) {}
// The resetState() method gets invoked at the beginning of each code region
// so that targets that override this function can clear any state that they
// have left from the previous code region.
virtual void resetState() {}
};
/// Class which can be overriden by targets to enforce instruction

View File

@ -472,13 +472,6 @@ struct InstrDesc {
// subtarget when computing the reciprocal throughput.
unsigned SchedClassID;
unsigned MayLoad : 1;
unsigned MayStore : 1;
unsigned HasSideEffects : 1;
unsigned BeginGroup : 1;
unsigned EndGroup : 1;
unsigned RetireOOO : 1;
// True if all buffered resources are in-order, and there is at least one
// buffer which is a dispatch hazard (BufferSize = 0).
unsigned MustIssueImmediately : 1;
@ -518,8 +511,16 @@ class InstructionBase {
unsigned Opcode;
// Flags used by the LSUnit.
bool IsALoadBarrier;
bool IsAStoreBarrier;
bool IsALoadBarrier : 1;
bool IsAStoreBarrier : 1;
// Flags copied from the InstrDesc and potentially modified by
// CustomBehaviour or (more likely) InstrPostProcess.
bool MayLoad : 1;
bool MayStore : 1;
bool HasSideEffects : 1;
bool BeginGroup : 1;
bool EndGroup : 1;
bool RetireOOO : 1;
public:
InstructionBase(const InstrDesc &D, const unsigned Opcode)
@ -568,7 +569,22 @@ public:
// Returns true if this instruction is a candidate for move elimination.
bool isOptimizableMove() const { return IsOptimizableMove; }
void setOptimizableMove() { IsOptimizableMove = true; }
bool isMemOp() const { return Desc.MayLoad || Desc.MayStore; }
bool isMemOp() const { return MayLoad || MayStore; }
// Getters and setters for general instruction flags.
void setMayLoad(bool newVal) { MayLoad = newVal; }
void setMayStore(bool newVal) { MayStore = newVal; }
void setHasSideEffects(bool newVal) { HasSideEffects = newVal; }
void setBeginGroup(bool newVal) { BeginGroup = newVal; }
void setEndGroup(bool newVal) { EndGroup = newVal; }
void setRetireOOO(bool newVal) { RetireOOO = newVal; }
bool getMayLoad() const { return MayLoad; }
bool getMayStore() const { return MayStore; }
bool getHasSideEffects() const { return HasSideEffects; }
bool getBeginGroup() const { return BeginGroup; }
bool getEndGroup() const { return EndGroup; }
bool getRetireOOO() const { return RetireOOO; }
};
/// An instruction propagated through the simulated instruction pipeline.

View File

@ -67,17 +67,17 @@ void LSUnitBase::dump() const {
#endif
unsigned LSUnit::dispatch(const InstRef &IR) {
const InstrDesc &Desc = IR.getInstruction()->getDesc();
bool IsStoreBarrier = IR.getInstruction()->isAStoreBarrier();
bool IsLoadBarrier = IR.getInstruction()->isALoadBarrier();
assert((Desc.MayLoad || Desc.MayStore) && "Not a memory operation!");
const Instruction &IS = *IR.getInstruction();
bool IsStoreBarrier = IS.isAStoreBarrier();
bool IsLoadBarrier = IS.isALoadBarrier();
assert((IS.getMayLoad() || IS.getMayStore()) && "Not a memory operation!");
if (Desc.MayLoad)
if (IS.getMayLoad())
acquireLQSlot();
if (Desc.MayStore)
if (IS.getMayStore())
acquireSQSlot();
if (Desc.MayStore) {
if (IS.getMayStore()) {
unsigned NewGID = createMemoryGroup();
MemoryGroup &NewGroup = getGroup(NewGID);
NewGroup.addInstruction();
@ -115,7 +115,7 @@ unsigned LSUnit::dispatch(const InstRef &IR) {
if (IsStoreBarrier)
CurrentStoreBarrierGroupID = NewGID;
if (Desc.MayLoad) {
if (IS.getMayLoad()) {
CurrentLoadGroupID = NewGID;
if (IsLoadBarrier)
CurrentLoadBarrierGroupID = NewGID;
@ -124,7 +124,7 @@ unsigned LSUnit::dispatch(const InstRef &IR) {
return NewGID;
}
assert(Desc.MayLoad && "Expected a load!");
assert(IS.getMayLoad() && "Expected a load!");
unsigned ImmediateLoadDominator =
std::max(CurrentLoadGroupID, CurrentLoadBarrierGroupID);
@ -194,10 +194,10 @@ unsigned LSUnit::dispatch(const InstRef &IR) {
}
LSUnit::Status LSUnit::isAvailable(const InstRef &IR) const {
const InstrDesc &Desc = IR.getInstruction()->getDesc();
if (Desc.MayLoad && isLQFull())
const Instruction &IS = *IR.getInstruction();
if (IS.getMayLoad() && isLQFull())
return LSUnit::LSU_LQUEUE_FULL;
if (Desc.MayStore && isSQFull())
if (IS.getMayStore() && isSQFull())
return LSUnit::LSU_SQUEUE_FULL;
return LSUnit::LSU_AVAILABLE;
}
@ -212,9 +212,9 @@ void LSUnitBase::onInstructionExecuted(const InstRef &IR) {
}
void LSUnitBase::onInstructionRetired(const InstRef &IR) {
const InstrDesc &Desc = IR.getInstruction()->getDesc();
bool IsALoad = Desc.MayLoad;
bool IsAStore = Desc.MayStore;
const Instruction &IS = *IR.getInstruction();
bool IsALoad = IS.getMayLoad();
bool IsAStore = IS.getMayStore();
assert((IsALoad || IsAStore) && "Expected a memory operation!");
if (IsALoad) {

View File

@ -572,6 +572,7 @@ InstrBuilder::createInstrDescImpl(const MCInst &MCI) {
LLVM_DEBUG(dbgs() << "\n\t\tOpcode Name= " << MCII.getName(Opcode) << '\n');
LLVM_DEBUG(dbgs() << "\t\tSchedClassID=" << SchedClassID << '\n');
LLVM_DEBUG(dbgs() << "\t\tOpcode=" << Opcode << '\n');
// Create a new empty descriptor.
std::unique_ptr<InstrDesc> ID = std::make_unique<InstrDesc>();
@ -593,13 +594,6 @@ InstrBuilder::createInstrDescImpl(const MCInst &MCI) {
FirstReturnInst = false;
}
ID->MayLoad = MCDesc.mayLoad();
ID->MayStore = MCDesc.mayStore();
ID->HasSideEffects = MCDesc.hasUnmodeledSideEffects();
ID->BeginGroup = SCDesc.BeginGroup;
ID->EndGroup = SCDesc.EndGroup;
ID->RetireOOO = SCDesc.RetireOOO;
initializeUsedResources(*ID, SCDesc, STI, ProcResourceMasks);
computeMaxLatency(*ID, MCDesc, SCDesc, STI);
@ -647,6 +641,17 @@ InstrBuilder::createInstruction(const MCInst &MCI) {
std::unique_ptr<Instruction> NewIS =
std::make_unique<Instruction>(D, MCI.getOpcode());
const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
const MCSchedClassDesc &SCDesc =
*STI.getSchedModel().getSchedClassDesc(D.SchedClassID);
NewIS->setMayLoad(MCDesc.mayLoad());
NewIS->setMayStore(MCDesc.mayStore());
NewIS->setHasSideEffects(MCDesc.hasUnmodeledSideEffects());
NewIS->setBeginGroup(SCDesc.BeginGroup);
NewIS->setEndGroup(SCDesc.EndGroup);
NewIS->setRetireOOO(SCDesc.RetireOOO);
// Check if this is a dependency breaking instruction.
APInt Mask;

View File

@ -78,7 +78,6 @@ bool DispatchStage::canDispatch(const InstRef &IR) const {
Error DispatchStage::dispatch(InstRef IR) {
assert(!CarryOver && "Cannot dispatch another instruction!");
Instruction &IS = *IR.getInstruction();
const InstrDesc &Desc = IS.getDesc();
const unsigned NumMicroOps = IS.getNumMicroOps();
if (NumMicroOps > DispatchWidth) {
assert(AvailableEntries == DispatchWidth);
@ -91,7 +90,7 @@ Error DispatchStage::dispatch(InstRef IR) {
}
// Check if this instructions ends the dispatch group.
if (Desc.EndGroup)
if (IS.getEndGroup())
AvailableEntries = 0;
// Check if this is an optimizable reg-reg move or an XCHG-like instruction.
@ -164,7 +163,7 @@ bool DispatchStage::isAvailable(const InstRef &IR) const {
if (Required > AvailableEntries)
return false;
if (Desc.BeginGroup && AvailableEntries != DispatchWidth)
if (Inst.getBeginGroup() && AvailableEntries != DispatchWidth)
return false;
// The dispatch logic doesn't internally buffer instructions. It only accepts

View File

@ -165,8 +165,8 @@ static void verifyInstructionEliminated(const InstRef &IR) {
// Ensure that instructions eliminated at register renaming stage are in a
// consistent state.
const InstrDesc &Desc = Inst.getDesc();
assert(!Desc.MayLoad && !Desc.MayStore && "Cannot eliminate a memory op!");
assert(!Inst.getMayLoad() && !Inst.getMayStore() &&
"Cannot eliminate a memory op!");
}
#endif

View File

@ -63,7 +63,6 @@ bool InOrderIssueStage::isAvailable(const InstRef &IR) const {
const Instruction &Inst = *IR.getInstruction();
unsigned NumMicroOps = Inst.getNumMicroOps();
const InstrDesc &Desc = Inst.getDesc();
bool ShouldCarryOver = NumMicroOps > getIssueWidth();
if (Bandwidth < NumMicroOps && !ShouldCarryOver)
@ -71,7 +70,7 @@ bool InOrderIssueStage::isAvailable(const InstRef &IR) const {
// Instruction with BeginGroup must be the first instruction to be issued in a
// cycle.
if (Desc.BeginGroup && NumIssued != 0)
if (Inst.getBeginGroup() && NumIssued != 0)
return false;
return true;
@ -140,7 +139,7 @@ bool InOrderIssueStage::canExecute(const InstRef &IR) {
}
if (LastWriteBackCycle) {
if (!IR.getInstruction()->getDesc().RetireOOO) {
if (!IR.getInstruction()->getRetireOOO()) {
unsigned NextWriteBackCycle = findFirstWriteBackCycle(IR);
// Delay the instruction to ensure that writes happen in program order.
if (NextWriteBackCycle < LastWriteBackCycle) {
@ -254,7 +253,7 @@ llvm::Error InOrderIssueStage::tryIssue(InstRef &IR) {
LLVM_DEBUG(dbgs() << "[N] Carry over #" << IR << " \n");
} else {
NumIssued += NumMicroOps;
Bandwidth = Desc.EndGroup ? 0 : Bandwidth - NumMicroOps;
Bandwidth = IS.getEndGroup() ? 0 : Bandwidth - NumMicroOps;
}
// If the instruction has a latency of 0, we need to handle
@ -272,7 +271,7 @@ llvm::Error InOrderIssueStage::tryIssue(InstRef &IR) {
IssuedInst.push_back(IR);
if (!IR.getInstruction()->getDesc().RetireOOO)
if (!IR.getInstruction()->getRetireOOO())
LastWriteBackCycle = IS.getCyclesLeft();
return llvm::ErrorSuccess();
@ -325,7 +324,7 @@ void InOrderIssueStage::updateCarriedOver() {
LLVM_DEBUG(dbgs() << "[N] Carry over (complete) #" << CarriedOver << " \n");
if (CarriedOver.getInstruction()->getDesc().EndGroup)
if (CarriedOver.getInstruction()->getEndGroup())
Bandwidth = 0;
else
Bandwidth -= CarryOver;

View File

@ -48,23 +48,23 @@ void SchedulerStatistics::onEvent(const HWInstructionEvent &Event) {
} else if (Event.Type == HWInstructionEvent::Dispatched) {
const Instruction &Inst = *Event.IR.getInstruction();
const unsigned Index = Event.IR.getSourceIndex();
if (LQResourceID && Inst.getDesc().MayLoad &&
if (LQResourceID && Inst.getMayLoad() &&
MostRecentLoadDispatched != Index) {
Usage[LQResourceID].SlotsInUse++;
MostRecentLoadDispatched = Index;
}
if (SQResourceID && Inst.getDesc().MayStore &&
if (SQResourceID && Inst.getMayStore() &&
MostRecentStoreDispatched != Index) {
Usage[SQResourceID].SlotsInUse++;
MostRecentStoreDispatched = Index;
}
} else if (Event.Type == HWInstructionEvent::Executed) {
const Instruction &Inst = *Event.IR.getInstruction();
if (LQResourceID && Inst.getDesc().MayLoad) {
if (LQResourceID && Inst.getMayLoad()) {
assert(Usage[LQResourceID].SlotsInUse);
Usage[LQResourceID].SlotsInUse--;
}
if (SQResourceID && Inst.getDesc().MayStore) {
if (SQResourceID && Inst.getMayStore()) {
assert(Usage[SQResourceID].SlotsInUse);
Usage[SQResourceID].SlotsInUse--;
}

View File

@ -465,6 +465,21 @@ int main(int argc, char **argv) {
const MCSchedModel &SM = STI->getSchedModel();
std::unique_ptr<mca::InstrPostProcess> IPP;
if (!DisableCustomBehaviour) {
// TODO: It may be a good idea to separate CB and IPP so that they can
// be used independently of each other. What I mean by this is to add
// an extra command-line arg --disable-ipp so that CB and IPP can be
// toggled without needing to toggle both of them together.
IPP = std::unique_ptr<mca::InstrPostProcess>(
TheTarget->createInstrPostProcess(*STI, *MCII));
}
if (!IPP) {
// If the target doesn't have its own IPP implemented (or the -disable-cb
// flag is set) then we use the base class (which does nothing).
IPP = std::make_unique<mca::InstrPostProcess>(*STI, *MCII);
}
// Create an instruction builder.
mca::InstrBuilder IB(*STI, *MCII, *MRI, MCIA.get());
@ -498,16 +513,7 @@ int main(int argc, char **argv) {
ArrayRef<MCInst> Insts = Region->getInstructions();
mca::CodeEmitter CE(*STI, *MAB, *MCE, Insts);
std::unique_ptr<mca::InstrPostProcess> IPP;
if (!DisableCustomBehaviour) {
IPP = std::unique_ptr<mca::InstrPostProcess>(
TheTarget->createInstrPostProcess(*STI, *MCII));
}
if (!IPP)
// If the target doesn't have its own IPP implemented (or the
// -disable-cb flag is set) then we use the base class
// (which does nothing).
IPP = std::make_unique<mca::InstrPostProcess>(*STI, *MCII);
IPP->resetState();
SmallVector<std::unique_ptr<mca::Instruction>> LoweredSequence;
for (const MCInst &MCI : Insts) {