diff --git a/llvm/include/llvm/Target/TargetInstrInfo.h b/llvm/include/llvm/Target/TargetInstrInfo.h index 3db219c4184c..b86e1566a159 100644 --- a/llvm/include/llvm/Target/TargetInstrInfo.h +++ b/llvm/include/llvm/Target/TargetInstrInfo.h @@ -21,6 +21,7 @@ #include "llvm/MC/MCInstrInfo.h" #include "llvm/Support/BranchProbability.h" #include "llvm/Target/TargetRegisterInfo.h" +#include "llvm/CodeGen/LiveIntervalAnalysis.h" namespace llvm { @@ -799,13 +800,15 @@ public: /// The new instruction is inserted before MI, and the client is responsible /// for removing the old instruction. MachineInstr *foldMemoryOperand(MachineBasicBlock::iterator MI, - ArrayRef Ops, int FrameIndex) const; + ArrayRef Ops, int FrameIndex, + LiveIntervals *LIS = nullptr) const; /// Same as the previous version except it allows folding of any load and /// store from / to any address, not just from a specific stack slot. MachineInstr *foldMemoryOperand(MachineBasicBlock::iterator MI, ArrayRef Ops, - MachineInstr *LoadMI) const; + MachineInstr *LoadMI, + LiveIntervals *LIS = nullptr) const; /// Return true when there is potentially a faster code sequence /// for an instruction chain ending in \p Root. All potential patterns are @@ -884,7 +887,8 @@ protected: /// at InsertPt. virtual MachineInstr *foldMemoryOperandImpl( MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, - MachineBasicBlock::iterator InsertPt, int FrameIndex) const { + MachineBasicBlock::iterator InsertPt, int FrameIndex, + LiveIntervals *LIS = nullptr) const { return nullptr; } @@ -895,7 +899,8 @@ protected: /// at InsertPt. virtual MachineInstr *foldMemoryOperandImpl( MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, - MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const { + MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI, + LiveIntervals *LIS = nullptr) const { return nullptr; } diff --git a/llvm/lib/CodeGen/InlineSpiller.cpp b/llvm/lib/CodeGen/InlineSpiller.cpp index 33340901816e..6d2fcb9c358d 100644 --- a/llvm/lib/CodeGen/InlineSpiller.cpp +++ b/llvm/lib/CodeGen/InlineSpiller.cpp @@ -761,8 +761,8 @@ foldMemoryOperand(ArrayRef > Ops, MachineInstrSpan MIS(MI); MachineInstr *FoldMI = - LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI) - : TII.foldMemoryOperand(MI, FoldOps, StackSlot); + LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI, &LIS) + : TII.foldMemoryOperand(MI, FoldOps, StackSlot, &LIS); if (!FoldMI) return false; diff --git a/llvm/lib/CodeGen/LiveRangeEdit.cpp b/llvm/lib/CodeGen/LiveRangeEdit.cpp index 3ed02f46c0ed..8b355f00255d 100644 --- a/llvm/lib/CodeGen/LiveRangeEdit.cpp +++ b/llvm/lib/CodeGen/LiveRangeEdit.cpp @@ -205,7 +205,7 @@ bool LiveRangeEdit::foldAsLoad(LiveInterval *LI, if (UseMI->readsWritesVirtualRegister(LI->reg, &Ops).second) return false; - MachineInstr *FoldMI = TII.foldMemoryOperand(UseMI, Ops, DefMI); + MachineInstr *FoldMI = TII.foldMemoryOperand(UseMI, Ops, DefMI, &LIS); if (!FoldMI) return false; DEBUG(dbgs() << " folded: " << *FoldMI); diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp index 800ad6d1bb4b..6d90f9dd8195 100644 --- a/llvm/lib/CodeGen/TargetInstrInfo.cpp +++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp @@ -497,7 +497,8 @@ static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI, /// stream. MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, ArrayRef Ops, - int FI) const { + int FI, + LiveIntervals *LIS) const { unsigned Flags = 0; for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (MI->getOperand(Ops[i]).isDef()) @@ -519,7 +520,7 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, MBB->insert(MI, NewMI); } else { // Ask the target to do the actual folding. - NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI); + NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS); } if (NewMI) { @@ -778,7 +779,8 @@ void TargetInstrInfo::genAlternativeCodeSequence( /// stack slot. MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, ArrayRef Ops, - MachineInstr *LoadMI) const { + MachineInstr *LoadMI, + LiveIntervals *LIS) const { assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!"); #ifndef NDEBUG for (unsigned i = 0, e = Ops.size(); i != e; ++i) @@ -800,7 +802,7 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, NewMI = MBB.insert(MI, NewMI); } else { // Ask the target to do the actual folding. - NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI); + NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS); } if (!NewMI) return nullptr; diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 4f1a72cf97be..e396c3350940 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -2448,7 +2448,8 @@ void llvm::emitFrameOffset(MachineBasicBlock &MBB, MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl( MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, - MachineBasicBlock::iterator InsertPt, int FrameIndex) const { + MachineBasicBlock::iterator InsertPt, int FrameIndex, + LiveIntervals *LIS) const { // This is a bit of a hack. Consider this instruction: // // %vreg0 = COPY %SP; GPR64all:%vreg0 diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h index 32d5c1941542..37a9d41f8451 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h @@ -143,7 +143,8 @@ public: MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, MachineBasicBlock::iterator InsertPt, - int FrameIndex) const override; + int FrameIndex, + LiveIntervals *LIS = nullptr) const override; bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp index 3938db256e79..fa824d010651 100644 --- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -15,6 +15,7 @@ #include "SystemZInstrBuilder.h" #include "SystemZTargetMachine.h" #include "llvm/CodeGen/LiveVariables.h" +#include "llvm/CodeGen/LiveIntervalAnalysis.h" #include "llvm/CodeGen/MachineRegisterInfo.h" using namespace llvm; @@ -846,31 +847,42 @@ SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, - MachineBasicBlock::iterator InsertPt, int FrameIndex) const { + MachineBasicBlock::iterator InsertPt, int FrameIndex, + LiveIntervals *LIS) const { + const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); const MachineFrameInfo *MFI = MF.getFrameInfo(); unsigned Size = MFI->getObjectSize(FrameIndex); unsigned Opcode = MI->getOpcode(); -// XXX This is an introduction of a CC def and is illegal! Reactivate -// with a check of liveness of CC reg. -#if 0 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { - if ((Opcode == SystemZ::LA || Opcode == SystemZ::LAY) && + if (LIS != nullptr && + (Opcode == SystemZ::LA || Opcode == SystemZ::LAY) && isInt<8>(MI->getOperand(2).getImm()) && !MI->getOperand(3).getReg()) { - // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST - MachineInstr *BuiltMI = - BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(), - get(SystemZ::AGSI)) + + // Check CC liveness, since new instruction introduces a dead + // def of CC. + MCRegUnitIterator CCUnit(SystemZ::CC, TRI); + LiveRange &CCLiveRange = LIS->getRegUnit(*CCUnit); + ++CCUnit; + assert (!CCUnit.isValid() && "CC only has one reg unit."); + SlotIndex MISlot = + LIS->getSlotIndexes()->getInstructionIndex(*MI).getRegSlot(); + if (!CCLiveRange.liveAt(MISlot)) { + // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST + MachineInstr *BuiltMI = + BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(), + get(SystemZ::AGSI)) .addFrameIndex(FrameIndex) .addImm(0) .addImm(MI->getOperand(2).getImm()); - BuiltMI->findRegisterDefOperand(SystemZ::CC)->setIsDead(true); - return BuiltMI; + BuiltMI->findRegisterDefOperand(SystemZ::CC)->setIsDead(true); + CCLiveRange.createDeadDef(MISlot, LIS->getVNInfoAllocator()); + return BuiltMI; + } } return nullptr; } -#endif // All other cases require a single operand. if (Ops.size() != 1) @@ -992,7 +1004,8 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, - MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const { + MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI, + LiveIntervals *LIS) const { return nullptr; } diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h index e995ff10ddcf..b5e4ff487338 100644 --- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h +++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h @@ -202,11 +202,13 @@ public: MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, MachineBasicBlock::iterator InsertPt, - int FrameIndex) const override; + int FrameIndex, + LiveIntervals *LIS = nullptr) const override; MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, MachineBasicBlock::iterator InsertPt, - MachineInstr *LoadMI) const override; + MachineInstr *LoadMI, + LiveIntervals *LIS = nullptr) const override; bool expandPostRAPseudo(MachineBasicBlock::iterator MBBI) const override; bool ReverseBranchCondition(SmallVectorImpl &Cond) const override; diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 45f3727a705c..f6c11c80855d 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -6081,7 +6081,8 @@ breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum, MachineInstr *X86InstrInfo::foldMemoryOperandImpl( MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, - MachineBasicBlock::iterator InsertPt, int FrameIndex) const { + MachineBasicBlock::iterator InsertPt, int FrameIndex, + LiveIntervals *LIS) const { // Check switch flag if (NoFusing) return nullptr; @@ -6193,14 +6194,15 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI, MachineInstr *X86InstrInfo::foldMemoryOperandImpl( MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, - MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const { + MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI, + LiveIntervals *LIS) const { // If loading from a FrameIndex, fold directly from the FrameIndex. unsigned NumOps = LoadMI->getDesc().getNumOperands(); int FrameIndex; if (isLoadFromStackSlot(LoadMI, FrameIndex)) { if (isNonFoldablePartialRegisterLoad(*LoadMI, *MI, MF)) return nullptr; - return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex); + return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex, LIS); } // Check switch flag diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h index d72589604ae1..5a82c161b272 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.h +++ b/llvm/lib/Target/X86/X86InstrInfo.h @@ -370,7 +370,8 @@ public: MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, MachineBasicBlock::iterator InsertPt, - int FrameIndex) const override; + int FrameIndex, + LiveIntervals *LIS = nullptr) const override; /// foldMemoryOperand - Same as the previous version except it allows folding /// of any load and store from / to any address, not just from a specific @@ -378,7 +379,8 @@ public: MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, MachineBasicBlock::iterator InsertPt, - MachineInstr *LoadMI) const override; + MachineInstr *LoadMI, + LiveIntervals *LIS = nullptr) const override; /// unfoldMemoryOperand - Separate a single instruction which folded a load or /// a store or a load and a store into two or more instruction. If this is diff --git a/llvm/test/CodeGen/SystemZ/int-add-12.ll b/llvm/test/CodeGen/SystemZ/int-add-12.ll index 96273a6692b9..496650f435c9 100644 --- a/llvm/test/CodeGen/SystemZ/int-add-12.ll +++ b/llvm/test/CodeGen/SystemZ/int-add-12.ll @@ -130,7 +130,7 @@ define void @f10(i64 %base, i64 %index) { ; Check that adding 127 to a spilled value can use AGSI. define void @f11(i64 *%ptr, i32 %sel) { ; CHECK-LABEL: f11: -; _CHECK: agsi {{[0-9]+}}(%r15), 127 +; CHECK: agsi {{[0-9]+}}(%r15), 127 ; CHECK: br %r14 entry: %val0 = load volatile i64 , i64 *%ptr