diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h index 7236bd5717ff..ac3d87c4bd51 100644 --- a/llvm/include/llvm/CodeGen/ISDOpcodes.h +++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h @@ -482,7 +482,7 @@ namespace ISD { // Operand #0 : Input chain. // Operand #1 : a ExternalSymbolSDNode with a pointer to the asm string. // Operand #2 : a MDNodeSDNode with the !srcloc metadata. - // Operand #3 : IsAlignStack bit. + // Operand #3 : HasSideEffect, IsAlignStack bits. // After this, it is followed by a list of operands with this format: // ConstantSDNode: Flags that encode whether it is a mem or not, the // of operands that follow, etc. See InlineAsm.h. diff --git a/llvm/include/llvm/CodeGen/MachineInstr.h b/llvm/include/llvm/CodeGen/MachineInstr.h index fc1e211af1db..82c5332ccd9f 100644 --- a/llvm/include/llvm/CodeGen/MachineInstr.h +++ b/llvm/include/llvm/CodeGen/MachineInstr.h @@ -237,6 +237,7 @@ public: bool isKill() const { return getOpcode() == TargetOpcode::KILL; } bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; } bool isInlineAsm() const { return getOpcode() == TargetOpcode::INLINEASM; } + bool isStackAligningInlineAsm() const; bool isInsertSubreg() const { return getOpcode() == TargetOpcode::INSERT_SUBREG; } @@ -432,6 +433,15 @@ public: /// return 0. unsigned isConstantValuePHI() const; + /// hasUnmodeledSideEffects - Return true if this instruction has side + /// effects that are not modeled by mayLoad / mayStore, etc. + /// For all instructions, the property is encoded in TargetInstrDesc::Flags + /// (see TargetInstrDesc::hasUnmodeledSideEffects(). The only exception is + /// INLINEASM instruction, in which case the side effect property is encoded + /// in one of its operands (see InlineAsm::Extra_HasSideEffect). + /// + bool hasUnmodeledSideEffects() const; + /// allDefsAreDead - Return true if all the defs of this instruction are dead. /// bool allDefsAreDead() const; diff --git a/llvm/include/llvm/InlineAsm.h b/llvm/include/llvm/InlineAsm.h index 4ca012e97464..ed8f0f7f615e 100644 --- a/llvm/include/llvm/InlineAsm.h +++ b/llvm/include/llvm/InlineAsm.h @@ -190,8 +190,15 @@ public: Op_InputChain = 0, Op_AsmString = 1, Op_MDNode = 2, - Op_IsAlignStack = 3, + Op_ExtraInfo = 3, // HasSideEffects, IsAlignStack Op_FirstOperand = 4, + + MIOp_AsmString = 0, + MIOp_ExtraInfo = 1, // HasSideEffects, IsAlignStack + MIOp_FirstOperand = 2, + + Extra_HasSideEffects = 1, + Extra_IsAlignStack = 2, Kind_RegUse = 1, Kind_RegDef = 2, diff --git a/llvm/include/llvm/Target/Target.td b/llvm/include/llvm/Target/Target.td index e88a09eecc6f..1bc00b610e55 100644 --- a/llvm/include/llvm/Target/Target.td +++ b/llvm/include/llvm/Target/Target.td @@ -418,6 +418,7 @@ def INLINEASM : Instruction { let OutOperandList = (outs); let InOperandList = (ins variable_ops); let AsmString = ""; + let neverHasSideEffects = 1; // Note side effect is encoded in an operand. } def PROLOG_LABEL : Instruction { let OutOperandList = (outs); diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp index ff9ecf2dbd07..c6166e2365a5 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp @@ -304,7 +304,7 @@ void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const { // Okay, we finally have a value number. Ask the target to print this // operand! if (CurVariant == -1 || CurVariant == AsmPrinterVariant) { - unsigned OpNo = 2; + unsigned OpNo = InlineAsm::MIOp_FirstOperand; bool Error = false; diff --git a/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp b/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp index 786de1b6a0be..32040e580bed 100644 --- a/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp +++ b/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp @@ -54,6 +54,12 @@ FunctionPass *llvm::createDeadMachineInstructionElimPass() { } bool DeadMachineInstructionElim::isDead(const MachineInstr *MI) const { + // Technically speaking inline asm without side effects and no defs can still + // be deleted. But there is so much bad inline asm code out there, we should + // let them be. + if (MI->isInlineAsm()) + return false; + // Don't delete instructions with side effects. bool SawStore = false; if (!MI->isSafeToMove(TII, 0, SawStore) && !MI->isPHI()) diff --git a/llvm/lib/CodeGen/MachineCSE.cpp b/llvm/lib/CodeGen/MachineCSE.cpp index bb1db8665852..3e54784278c2 100644 --- a/llvm/lib/CodeGen/MachineCSE.cpp +++ b/llvm/lib/CodeGen/MachineCSE.cpp @@ -262,7 +262,7 @@ bool MachineCSE::isCSECandidate(MachineInstr *MI) { // Ignore stuff that we obviously can't move. const TargetInstrDesc &TID = MI->getDesc(); if (TID.mayStore() || TID.isCall() || TID.isTerminator() || - TID.hasUnmodeledSideEffects()) + MI->hasUnmodeledSideEffects()) return false; if (TID.mayLoad()) { diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp index ccb2ffbab1d4..3de171687248 100644 --- a/llvm/lib/CodeGen/MachineInstr.cpp +++ b/llvm/lib/CodeGen/MachineInstr.cpp @@ -826,6 +826,14 @@ unsigned MachineInstr::getNumExplicitOperands() const { return NumOperands; } +bool MachineInstr::isStackAligningInlineAsm() const { + if (isInlineAsm()) { + unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); + if (ExtraInfo & InlineAsm::Extra_IsAlignStack) + return true; + } + return false; +} /// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of /// the specific register or -1 if it is not found. It further tightens @@ -925,14 +933,15 @@ int MachineInstr::findFirstPredOperandIdx() const { bool MachineInstr:: isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx) const { if (isInlineAsm()) { - assert(DefOpIdx >= 3); + assert(DefOpIdx > InlineAsm::MIOp_FirstOperand); const MachineOperand &MO = getOperand(DefOpIdx); if (!MO.isReg() || !MO.isDef() || MO.getReg() == 0) return false; // Determine the actual operand index that corresponds to this index. unsigned DefNo = 0; unsigned DefPart = 0; - for (unsigned i = 2, e = getNumOperands(); i < e; ) { + for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); + i < e; ) { const MachineOperand &FMO = getOperand(i); // After the normal asm operands there may be additional imp-def regs. if (!FMO.isImm()) @@ -947,7 +956,8 @@ isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx) const { } ++DefNo; } - for (unsigned i = 2, e = getNumOperands(); i != e; ++i) { + for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); + i != e; ++i) { const MachineOperand &FMO = getOperand(i); if (!FMO.isImm()) continue; @@ -990,7 +1000,8 @@ isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx) const { // Find the flag operand corresponding to UseOpIdx unsigned FlagIdx, NumOps=0; - for (FlagIdx = 2; FlagIdx < UseOpIdx; FlagIdx += NumOps+1) { + for (FlagIdx = InlineAsm::MIOp_FirstOperand; + FlagIdx < UseOpIdx; FlagIdx += NumOps+1) { const MachineOperand &UFMO = getOperand(FlagIdx); // After the normal asm operands there may be additional imp-def regs. if (!UFMO.isImm()) @@ -1008,9 +1019,9 @@ isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx) const { if (!DefOpIdx) return true; - unsigned DefIdx = 2; + unsigned DefIdx = InlineAsm::MIOp_FirstOperand; // Remember to adjust the index. First operand is asm string, second is - // the AlignStack bit, then there is a flag for each. + // the HasSideEffects and AlignStack bits, then there is a flag for each. while (DefNo) { const MachineOperand &FMO = getOperand(DefIdx); assert(FMO.isImm()); @@ -1117,7 +1128,7 @@ bool MachineInstr::isSafeToMove(const TargetInstrInfo *TII, } if (isLabel() || isDebugValue() || - TID->isTerminator() || TID->hasUnmodeledSideEffects()) + TID->isTerminator() || hasUnmodeledSideEffects()) return false; // See if this instruction does a load. If so, we have to guarantee that the @@ -1168,7 +1179,7 @@ bool MachineInstr::hasVolatileMemoryRef() const { if (!TID->mayStore() && !TID->mayLoad() && !TID->isCall() && - !TID->hasUnmodeledSideEffects()) + !hasUnmodeledSideEffects()) return false; // Otherwise, if the instruction has no memory reference information, @@ -1242,6 +1253,18 @@ unsigned MachineInstr::isConstantValuePHI() const { return Reg; } +bool MachineInstr::hasUnmodeledSideEffects() const { + if (getDesc().hasUnmodeledSideEffects()) + return true; + if (isInlineAsm()) { + unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); + if (ExtraInfo & InlineAsm::Extra_HasSideEffects) + return true; + } + + return false; +} + /// allDefsAreDead - Return true if all the defs of this instruction are dead. /// bool MachineInstr::allDefsAreDead() const { @@ -1329,6 +1352,24 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const { // Print the rest of the operands. bool OmittedAnyCallClobbers = false; bool FirstOp = true; + + if (isInlineAsm()) { + // Print asm string. + OS << " "; + getOperand(InlineAsm::MIOp_AsmString).print(OS, TM); + + // Print HasSideEffects, IsAlignStack + unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); + if (ExtraInfo & InlineAsm::Extra_HasSideEffects) + OS << " [sideeffect]"; + if (ExtraInfo & InlineAsm::Extra_IsAlignStack) + OS << " [alignstack]"; + + StartOp = InlineAsm::MIOp_FirstOperand; + FirstOp = false; + } + + for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) { const MachineOperand &MO = getOperand(i); diff --git a/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/llvm/lib/CodeGen/PeepholeOptimizer.cpp index 1ddfe8b7c230..54466efd5a7c 100644 --- a/llvm/lib/CodeGen/PeepholeOptimizer.cpp +++ b/llvm/lib/CodeGen/PeepholeOptimizer.cpp @@ -338,7 +338,7 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) { if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() || MI->isKill() || MI->isInlineAsm() || MI->isDebugValue() || - MI->getDesc().hasUnmodeledSideEffects()) + MI->hasUnmodeledSideEffects()) continue; if (MI->getDesc().isCompare()) { diff --git a/llvm/lib/CodeGen/PrologEpilogInserter.cpp b/llvm/lib/CodeGen/PrologEpilogInserter.cpp index 5ba7086eef62..699c9b98cc85 100644 --- a/llvm/lib/CodeGen/PrologEpilogInserter.cpp +++ b/llvm/lib/CodeGen/PrologEpilogInserter.cpp @@ -21,6 +21,7 @@ #define DEBUG_TYPE "pei" #include "PrologEpilogInserter.h" +#include "llvm/InlineAsm.h" #include "llvm/CodeGen/MachineDominators.h" #include "llvm/CodeGen/MachineLoopInfo.h" #include "llvm/CodeGen/MachineInstr.h" @@ -172,7 +173,8 @@ void PEI::calculateCallsInformation(MachineFunction &Fn) { FrameSDOps.push_back(I); } else if (I->isInlineAsm()) { // Some inline asm's need a stack frame, as indicated by operand 1. - if (I->getOperand(1).getImm()) + unsigned ExtraInfo = I->getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); + if (ExtraInfo & InlineAsm::Extra_IsAlignStack) AdjustsStack = true; } diff --git a/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp b/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp index 5690eb4d9b38..f17023eabb72 100644 --- a/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -410,7 +410,7 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) { // produce more precise dependence information. #define STORE_LOAD_LATENCY 1 unsigned TrueMemOrderLatency = 0; - if (TID.isCall() || TID.hasUnmodeledSideEffects() || + if (TID.isCall() || MI->hasUnmodeledSideEffects() || (MI->hasVolatileMemoryRef() && (!TID.mayLoad() || !MI->isInvariantLoad(AA)))) { // Be conservative with these and add dependencies on all memory diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp index 977cd63844b1..d7bf6c16b70a 100644 --- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp @@ -821,11 +821,11 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned, const char *AsmStr = cast(AsmStrV)->getSymbol(); MI->addOperand(MachineOperand::CreateES(AsmStr)); - // Add the isAlignStack bit. - int64_t isAlignStack = - cast(Node->getOperand(InlineAsm::Op_IsAlignStack))-> + // Add the HasSideEffect and isAlignStack bits. + int64_t ExtraInfo = + cast(Node->getOperand(InlineAsm::Op_ExtraInfo))-> getZExtValue(); - MI->addOperand(MachineOperand::CreateImm(isAlignStack)); + MI->addOperand(MachineOperand::CreateImm(ExtraInfo)); // Add all of the operand registers to the instruction. for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 2167523f2344..2869bd8a3084 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -5738,9 +5738,14 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) { const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc"); AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc)); - // Remember the AlignStack bit as operand 3. - AsmNodeOperands.push_back(DAG.getTargetConstant(IA->isAlignStack() ? 1 : 0, - MVT::i1)); + // Remember the HasSideEffect and AlignStack bits as operand 3. + unsigned ExtraInfo = 0; + if (IA->hasSideEffects()) + ExtraInfo |= InlineAsm::Extra_HasSideEffects; + if (IA->isAlignStack()) + ExtraInfo |= InlineAsm::Extra_IsAlignStack; + AsmNodeOperands.push_back(DAG.getTargetConstant(ExtraInfo, + TLI.getPointerTy())); // Loop over all of the inputs, copying the operand values into the // appropriate registers and processing the output regs. diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 4cc276101cb0..279478f4a91d 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -380,10 +380,8 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) { II = MBB->begin(), IE = MBB->end(); II != IE; ++II) { const TargetInstrDesc &TID = TM.getInstrInfo()->get(II->getOpcode()); - // Operand 1 of an inline asm instruction indicates whether the asm - // needs stack or not. - if ((II->isInlineAsm() && II->getOperand(1).getImm()) || - (TID.isCall() && !TID.isReturn())) { + if ((TID.isCall() && !TID.isReturn()) || + II->isStackAligningInlineAsm()) { MFI->setHasCalls(true); goto done; } @@ -1283,7 +1281,7 @@ SelectInlineAsmMemoryOperands(std::vector &Ops) { Ops.push_back(InOps[InlineAsm::Op_InputChain]); // 0 Ops.push_back(InOps[InlineAsm::Op_AsmString]); // 1 Ops.push_back(InOps[InlineAsm::Op_MDNode]); // 2, !srcloc - Ops.push_back(InOps[InlineAsm::Op_IsAlignStack]); // 3 + Ops.push_back(InOps[InlineAsm::Op_ExtraInfo]); // 3 (SideEffect, AlignStack) unsigned i = InlineAsm::Op_FirstOperand, e = InOps.size(); if (InOps[e-1].getValueType() == MVT::Glue) diff --git a/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp b/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp index 28b973b16eb5..5fc7f3230798 100644 --- a/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp +++ b/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp @@ -329,8 +329,13 @@ isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI, const TargetInstrDesc &TID = MI->getDesc(); // Avoid instructions obviously unsafe for remat. - if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable() || - TID.mayStore()) + if (TID.isNotDuplicable() || TID.mayStore() || + MI->hasUnmodeledSideEffects()) + return false; + + // Don't remat inline asm. We have no idea how expensive it is + // even if it's side effect free. + if (MI->isInlineAsm()) return false; // Avoid instructions which load from potentially varying memory. diff --git a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp index 0d99585acdaf..693fbb68b3aa 100644 --- a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp +++ b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp @@ -743,7 +743,7 @@ static bool isSafeToDelete(MachineInstr *MI, const TargetInstrDesc &TID = MI->getDesc(); if (TID.mayStore() || TID.isCall()) return false; - if (TID.isTerminator() || TID.hasUnmodeledSideEffects()) + if (TID.isTerminator() || MI->hasUnmodeledSideEffects()) return false; for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { diff --git a/llvm/lib/CodeGen/VirtRegRewriter.cpp b/llvm/lib/CodeGen/VirtRegRewriter.cpp index d04b9285bab9..87781067ef8a 100644 --- a/llvm/lib/CodeGen/VirtRegRewriter.cpp +++ b/llvm/lib/CodeGen/VirtRegRewriter.cpp @@ -1620,9 +1620,16 @@ static bool isSafeToDelete(MachineInstr &MI) { const TargetInstrDesc &TID = MI.getDesc(); if (TID.mayLoad() || TID.mayStore() || TID.isCall() || TID.isTerminator() || TID.isCall() || TID.isBarrier() || TID.isReturn() || - TID.hasUnmodeledSideEffects() || - MI.isLabel() || MI.isDebugValue()) + MI.isLabel() || MI.isDebugValue() || + MI.hasUnmodeledSideEffects()) return false; + + // Technically speaking inline asm without side effects and no defs can still + // be deleted. But there is so much bad inline asm code out there, we should + // let them be. + if (MI.isInlineAsm()) + return false; + for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); if (!MO.isReg() || !MO.getReg()) diff --git a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index f2d705f0ec5a..2eca4b124fa2 100644 --- a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -1467,7 +1467,7 @@ static bool IsSafeAndProfitableToMove(bool isLd, unsigned Base, if (I->isDebugValue() || MemOps.count(&*I)) continue; const TargetInstrDesc &TID = I->getDesc(); - if (TID.isCall() || TID.isTerminator() || TID.hasUnmodeledSideEffects()) + if (TID.isCall() || TID.isTerminator() || I->hasUnmodeledSideEffects()) return false; if (isLd && TID.mayStore()) return false; diff --git a/llvm/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp b/llvm/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp index f38f90b8f80f..4399ee280098 100644 --- a/llvm/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp +++ b/llvm/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp @@ -187,9 +187,8 @@ static bool isDelayFiller(MachineBasicBlock &MBB, return (brdesc.hasDelaySlot()); } -static bool hasUnknownSideEffects(MachineBasicBlock::iterator &I, - TargetInstrDesc &desc) { - if (!desc.hasUnmodeledSideEffects()) +static bool hasUnknownSideEffects(MachineBasicBlock::iterator &I) { + if (!I->hasUnmodeledSideEffects()) return false; unsigned op = I->getOpcode(); @@ -215,7 +214,7 @@ findDelayInstr(MachineBasicBlock &MBB,MachineBasicBlock::iterator slot) { TargetInstrDesc desc = I->getDesc(); if (desc.hasDelaySlot() || desc.isBranch() || isDelayFiller(MBB,I) || desc.isCall() || desc.isReturn() || desc.isBarrier() || - hasUnknownSideEffects(I,desc)) + hasUnknownSideEffects(I)) break; if (hasImmInstruction(I) || delayHasHazard(I,slot)) diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp index 60c9096d5180..f29d127c85d1 100644 --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -1036,8 +1036,8 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { } const TargetInstrDesc &TID = MI.getDesc(); - if (TID.hasUnmodeledSideEffects() || - TID.hasImplicitDefOfPhysReg(X86::EFLAGS)) + if (TID.hasImplicitDefOfPhysReg(X86::EFLAGS) || + MI.hasUnmodeledSideEffects()) break; } diff --git a/llvm/test/CodeGen/X86/2008-09-17-inline-asm-1.ll b/llvm/test/CodeGen/X86/2008-09-17-inline-asm-1.ll index 3c64fe45c997..86e50c98bfdb 100644 --- a/llvm/test/CodeGen/X86/2008-09-17-inline-asm-1.ll +++ b/llvm/test/CodeGen/X86/2008-09-17-inline-asm-1.ll @@ -15,14 +15,16 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" target triple = "i386-apple-darwin8" -@x = common global i32 0 ; [#uses=1] +@x = common global i32 0 define i32 @aci(i32* %pw) nounwind { entry: - %0 = load i32* @x, align 4 ; [#uses=1] - %asmtmp = tail call { i32, i32 } asm "movl $0, %eax\0A\090:\0A\09test %eax, %eax\0A\09je 1f\0A\09movl %eax, $2\0A\09incl $2\0A\09lock\0A\09cmpxchgl $2, $0\0A\09jne 0b\0A\091:", "=*m,=&{ax},=&r,*m,~{dirflag},~{fpsr},~{flags},~{memory},~{cc}"(i32* %pw, i32* %pw) nounwind ; <{ i32, i32 }> [#uses=0] - %asmtmp2 = tail call { i32, i32 } asm "movl $0, %edx\0A\090:\0A\09test %edx, %edx\0A\09je 1f\0A\09movl %edx, $2\0A\09incl $2\0A\09lock\0A\09cmpxchgl $2, $0\0A\09jne 0b\0A\091:", "=*m,=&{dx},=&r,*m,~{dirflag},~{fpsr},~{flags},~{memory},~{cc}"(i32* %pw, i32* %pw) nounwind ; <{ i32, i32 }> [#uses=1] - %asmresult3 = extractvalue { i32, i32 } %asmtmp2, 0 ; [#uses=1] - %1 = add i32 %asmresult3, %0 ; [#uses=1] - ret i32 %1 + %0 = load i32* @x, align 4 + %asmtmp = tail call { i32, i32 } asm "movl $0, %eax\0A\090:\0A\09test %eax, %eax\0A\09je 1f\0A\09movl %eax, $2\0A\09incl $2\0A\09lock\0A\09cmpxchgl $2, $0\0A\09jne 0b\0A\091:", "=*m,=&{ax},=&r,*m,~{dirflag},~{fpsr},~{flags},~{memory},~{cc}"(i32* %pw, i32* %pw) nounwind + %asmtmp2 = tail call { i32, i32 } asm "movl $0, %edx\0A\090:\0A\09test %edx, %edx\0A\09je 1f\0A\09movl %edx, $2\0A\09incl $2\0A\09lock\0A\09cmpxchgl $2, $0\0A\09jne 0b\0A\091:", "=*m,=&{dx},=&r,*m,~{dirflag},~{fpsr},~{flags},~{memory},~{cc}"(i32* %pw, i32* %pw) nounwind + %asmresult2 = extractvalue { i32, i32 } %asmtmp, 0 + %asmresult3 = extractvalue { i32, i32 } %asmtmp2, 0 + %1 = add i32 %asmresult2, %asmresult3 + %2 = add i32 %0, %1 + ret i32 %2 }