Cache the lookup of TargetLowering in the atomic expand pass.

llvm-svn: 227121
This commit is contained in:
Eric Christopher 2015-01-26 19:45:40 +00:00
parent 3de4971a7a
commit b11a1b7b2c
1 changed files with 17 additions and 31 deletions

View File

@ -31,10 +31,11 @@ using namespace llvm;
namespace {
class AtomicExpand: public FunctionPass {
const TargetMachine *TM;
const TargetLowering *TLI;
public:
static char ID; // Pass identification, replacement for typeid
explicit AtomicExpand(const TargetMachine *TM = nullptr)
: FunctionPass(ID), TM(TM) {
: FunctionPass(ID), TM(TM), TLI(nullptr) {
initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
}
@ -69,7 +70,7 @@ FunctionPass *llvm::createAtomicExpandPass(const TargetMachine *TM) {
bool AtomicExpand::runOnFunction(Function &F) {
if (!TM || !TM->getSubtargetImpl()->enableAtomicExpand())
return false;
auto TargetLowering = TM->getSubtargetImpl()->getTargetLowering();
TLI = TM->getSubtargetImpl()->getTargetLowering();
SmallVector<Instruction *, 1> AtomicInsts;
@ -91,7 +92,7 @@ bool AtomicExpand::runOnFunction(Function &F) {
auto FenceOrdering = Monotonic;
bool IsStore, IsLoad;
if (TargetLowering->getInsertFencesForAtomic()) {
if (TLI->getInsertFencesForAtomic()) {
if (LI && isAtLeastAcquire(LI->getOrdering())) {
FenceOrdering = LI->getOrdering();
LI->setOrdering(Monotonic);
@ -107,7 +108,7 @@ bool AtomicExpand::runOnFunction(Function &F) {
FenceOrdering = RMWI->getOrdering();
RMWI->setOrdering(Monotonic);
IsStore = IsLoad = true;
} else if (CASI && !TargetLowering->hasLoadLinkedStoreConditional() &&
} else if (CASI && !TLI->hasLoadLinkedStoreConditional() &&
(isAtLeastRelease(CASI->getSuccessOrdering()) ||
isAtLeastAcquire(CASI->getSuccessOrdering()))) {
// If a compare and swap is lowered to LL/SC, we can do smarter fence
@ -125,20 +126,19 @@ bool AtomicExpand::runOnFunction(Function &F) {
}
}
if (LI && TargetLowering->shouldExpandAtomicLoadInIR(LI)) {
if (LI && TLI->shouldExpandAtomicLoadInIR(LI)) {
MadeChange |= expandAtomicLoad(LI);
} else if (SI && TargetLowering->shouldExpandAtomicStoreInIR(SI)) {
} else if (SI && TLI->shouldExpandAtomicStoreInIR(SI)) {
MadeChange |= expandAtomicStore(SI);
} else if (RMWI) {
// There are two different ways of expanding RMW instructions:
// - into a load if it is idempotent
// - into a Cmpxchg/LL-SC loop otherwise
// we try them in that order.
MadeChange |= (isIdempotentRMW(RMWI) &&
simplifyIdempotentRMW(RMWI)) ||
(TargetLowering->shouldExpandAtomicRMWInIR(RMWI) &&
expandAtomicRMW(RMWI));
} else if (CASI && TargetLowering->hasLoadLinkedStoreConditional()) {
MadeChange |=
(isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) ||
(TLI->shouldExpandAtomicRMWInIR(RMWI) && expandAtomicRMW(RMWI));
} else if (CASI && TLI->hasLoadLinkedStoreConditional()) {
MadeChange |= expandAtomicCmpXchg(CASI);
}
}
@ -149,13 +149,9 @@ bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order,
bool IsStore, bool IsLoad) {
IRBuilder<> Builder(I);
auto LeadingFence =
TM->getSubtargetImpl()->getTargetLowering()->emitLeadingFence(
Builder, Order, IsStore, IsLoad);
auto LeadingFence = TLI->emitLeadingFence(Builder, Order, IsStore, IsLoad);
auto TrailingFence =
TM->getSubtargetImpl()->getTargetLowering()->emitTrailingFence(
Builder, Order, IsStore, IsLoad);
auto TrailingFence = TLI->emitTrailingFence(Builder, Order, IsStore, IsLoad);
// The trailing fence is emitted before the instruction instead of after
// because there is no easy way of setting Builder insertion point after
// an instruction. So we must erase it from the BB, and insert it back
@ -171,16 +167,13 @@ bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order,
}
bool AtomicExpand::expandAtomicLoad(LoadInst *LI) {
if (TM->getSubtargetImpl()
->getTargetLowering()
->hasLoadLinkedStoreConditional())
if (TLI->hasLoadLinkedStoreConditional())
return expandAtomicLoadToLL(LI);
else
return expandAtomicLoadToCmpXchg(LI);
}
bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
auto TLI = TM->getSubtargetImpl()->getTargetLowering();
IRBuilder<> Builder(LI);
// On some architectures, load-linked instructions are atomic for larger
@ -231,9 +224,7 @@ bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
}
bool AtomicExpand::expandAtomicRMW(AtomicRMWInst *AI) {
if (TM->getSubtargetImpl()
->getTargetLowering()
->hasLoadLinkedStoreConditional())
if (TLI->hasLoadLinkedStoreConditional())
return expandAtomicRMWToLLSC(AI);
else
return expandAtomicRMWToCmpXchg(AI);
@ -277,7 +268,6 @@ static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
}
bool AtomicExpand::expandAtomicRMWToLLSC(AtomicRMWInst *AI) {
auto TLI = TM->getSubtargetImpl()->getTargetLowering();
AtomicOrdering MemOpOrder = AI->getOrdering();
Value *Addr = AI->getPointerOperand();
BasicBlock *BB = AI->getParent();
@ -397,7 +387,6 @@ bool AtomicExpand::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI) {
}
bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
auto TLI = TM->getSubtargetImpl()->getTargetLowering();
AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
AtomicOrdering FailureOrder = CI->getFailureOrdering();
Value *Addr = CI->getPointerOperand();
@ -551,13 +540,10 @@ bool AtomicExpand::isIdempotentRMW(AtomicRMWInst* RMWI) {
}
bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) {
auto TLI = TM->getSubtargetImpl()->getTargetLowering();
if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
if (TLI->shouldExpandAtomicLoadInIR(ResultingLoad))
expandAtomicLoad(ResultingLoad);
return true;
}
return false;
}