Update a few calls to getSubtarget<> to either be getSubtargetImpl

when we didn't need the cast to the base class or the cached version
off of the subtarget.

llvm-svn: 227176
This commit is contained in:
Eric Christopher 2015-01-27 07:54:39 +00:00
parent 36d9273128
commit 2c63549386
5 changed files with 16 additions and 18 deletions

View File

@ -225,7 +225,7 @@ void BasicTTI::getUnrollingPreferences(const Function *F, Loop *L,
// until someone finds a case where it matters in practice.
unsigned MaxOps;
const TargetSubtargetInfo *ST = &TM->getSubtarget<TargetSubtargetInfo>(F);
const TargetSubtargetInfo *ST = TM->getSubtargetImpl(F);
if (PartialUnrollingThreshold.getNumOccurrences() > 0)
MaxOps = PartialUnrollingThreshold;
else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)

View File

@ -3145,8 +3145,8 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType());
} else if (AddrSinkUsingGEPs ||
(!AddrSinkUsingGEPs.getNumOccurrences() && TM &&
TM->getSubtarget<TargetSubtargetInfo>(
MemoryInst->getParent()->getParent()).useAA())) {
TM->getSubtargetImpl(*MemoryInst->getParent()->getParent())
->useAA())) {
// By default, we use the GEP-based method when AA is used later. This
// prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "

View File

@ -249,7 +249,7 @@ TargetPassConfig::TargetPassConfig(TargetMachine *tm, PassManagerBase &pm)
substitutePass(&PostRAMachineLICMID, &MachineLICMID);
// Temporarily disable experimental passes.
const TargetSubtargetInfo &ST = TM->getSubtarget<TargetSubtargetInfo>();
const TargetSubtargetInfo &ST = *TM->getSubtargetImpl();
if (!ST.useMachineScheduler())
disablePass(&MachineSchedulerID);
}

View File

@ -2716,15 +2716,14 @@ bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) {
MF = &fn;
MRI = &fn.getRegInfo();
TM = &fn.getTarget();
TRI = TM->getSubtargetImpl()->getRegisterInfo();
TII = TM->getSubtargetImpl()->getInstrInfo();
const TargetSubtargetInfo &STI = fn.getSubtarget();
TRI = STI.getRegisterInfo();
TII = STI.getInstrInfo();
LIS = &getAnalysis<LiveIntervals>();
AA = &getAnalysis<AliasAnalysis>();
Loops = &getAnalysis<MachineLoopInfo>();
const TargetSubtargetInfo &ST = TM->getSubtarget<TargetSubtargetInfo>();
if (EnableGlobalCopies == cl::BOU_UNSET)
JoinGlobalCopies = ST.useMachineScheduler();
JoinGlobalCopies = STI.useMachineScheduler();
else
JoinGlobalCopies = (EnableGlobalCopies == cl::BOU_TRUE);

View File

@ -51,18 +51,17 @@ static cl::opt<bool> UseTBAA("use-tbaa-in-sched-mi", cl::Hidden,
ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,
const MachineLoopInfo *mli,
bool IsPostRAFlag,
bool RemoveKillFlags,
bool IsPostRAFlag, bool RemoveKillFlags,
LiveIntervals *lis)
: ScheduleDAG(mf), MLI(mli), MFI(mf.getFrameInfo()), LIS(lis),
IsPostRA(IsPostRAFlag), RemoveKillFlags(RemoveKillFlags),
CanHandleTerminators(false), FirstDbgValue(nullptr) {
: ScheduleDAG(mf), MLI(mli), MFI(mf.getFrameInfo()), LIS(lis),
IsPostRA(IsPostRAFlag), RemoveKillFlags(RemoveKillFlags),
CanHandleTerminators(false), FirstDbgValue(nullptr) {
assert((IsPostRA || LIS) && "PreRA scheduling requires LiveIntervals");
DbgValues.clear();
assert(!(IsPostRA && MRI.getNumVirtRegs()) &&
"Virtual registers must be removed prior to PostRA scheduling");
const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
const TargetSubtargetInfo &ST = mf.getSubtarget();
SchedModel.init(ST.getSchedModel(), &ST, TII);
}
@ -253,7 +252,7 @@ void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) {
assert(MO.isDef() && "expect physreg def");
// Ask the target if address-backscheduling is desirable, and if so how much.
const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
const TargetSubtargetInfo &ST = MF.getSubtarget();
for (MCRegAliasIterator Alias(MO.getReg(), TRI, true);
Alias.isValid(); ++Alias) {
@ -444,7 +443,7 @@ void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) {
int DefOp = Def->findRegisterDefOperandIdx(Reg);
dep.setLatency(SchedModel.computeOperandLatency(Def, DefOp, MI, OperIdx));
const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
const TargetSubtargetInfo &ST = MF.getSubtarget();
ST.adjustSchedDependency(DefSU, SU, const_cast<SDep &>(dep));
SU->addPred(dep);
}
@ -743,7 +742,7 @@ void ScheduleDAGInstrs::initSUnits() {
void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
RegPressureTracker *RPTracker,
PressureDiffs *PDiffs) {
const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
const TargetSubtargetInfo &ST = MF.getSubtarget();
bool UseAA = EnableAASchedMI.getNumOccurrences() > 0 ? EnableAASchedMI
: ST.useAA();
AliasAnalysis *AAForDep = UseAA ? AA : nullptr;