diff --git a/llvm/include/llvm/CodeGen/Passes.h b/llvm/include/llvm/CodeGen/Passes.h index 0c834cd50e7f..96cfce5b84df 100644 --- a/llvm/include/llvm/CodeGen/Passes.h +++ b/llvm/include/llvm/CodeGen/Passes.h @@ -278,11 +278,6 @@ namespace llvm { /// MachineSinking - This pass performs sinking on machine instructions. extern char &MachineSinkingID; - /// MachineCopyPropagationPreRegRewrite - This pass performs copy propagation - /// on machine instructions after register allocation but before virtual - /// register re-writing.. - extern char &MachineCopyPropagationPreRegRewriteID; - /// MachineCopyPropagation - This pass performs copy propagation on /// machine instructions. extern char &MachineCopyPropagationID; diff --git a/llvm/include/llvm/InitializePasses.h b/llvm/include/llvm/InitializePasses.h index 3e34b59d2967..39ac4649b70d 100644 --- a/llvm/include/llvm/InitializePasses.h +++ b/llvm/include/llvm/InitializePasses.h @@ -233,7 +233,6 @@ void initializeMachineBranchProbabilityInfoPass(PassRegistry&); void initializeMachineCSEPass(PassRegistry&); void initializeMachineCombinerPass(PassRegistry&); void initializeMachineCopyPropagationPass(PassRegistry&); -void initializeMachineCopyPropagationPreRegRewritePass(PassRegistry&); void initializeMachineDominanceFrontierPass(PassRegistry&); void initializeMachineDominatorTreePass(PassRegistry&); void initializeMachineFunctionPrinterPassPass(PassRegistry&); diff --git a/llvm/lib/CodeGen/CodeGen.cpp b/llvm/lib/CodeGen/CodeGen.cpp index c92b45a752ee..b7fd45a3f6a6 100644 --- a/llvm/lib/CodeGen/CodeGen.cpp +++ b/llvm/lib/CodeGen/CodeGen.cpp @@ -54,7 +54,6 @@ void llvm::initializeCodeGen(PassRegistry &Registry) { initializeMachineCSEPass(Registry); initializeMachineCombinerPass(Registry); initializeMachineCopyPropagationPass(Registry); - initializeMachineCopyPropagationPreRegRewritePass(Registry); initializeMachineDominatorTreePass(Registry); initializeMachineFunctionPrinterPassPass(Registry); initializeMachineLICMPass(Registry); diff --git a/llvm/lib/CodeGen/MachineCopyPropagation.cpp b/llvm/lib/CodeGen/MachineCopyPropagation.cpp index 39a5b9ff6ed7..7d5a68192e6b 100644 --- a/llvm/lib/CodeGen/MachineCopyPropagation.cpp +++ b/llvm/lib/CodeGen/MachineCopyPropagation.cpp @@ -7,62 +7,18 @@ // //===----------------------------------------------------------------------===// // -// This is a simple MachineInstr-level copy forwarding pass. It may be run at -// two places in the codegen pipeline: -// - After register allocation but before virtual registers have been remapped -// to physical registers. -// - After physical register remapping. -// -// The optimizations done vary slightly based on whether virtual registers are -// still present. In both cases, this pass forwards the source of COPYs to the -// users of their destinations when doing so is legal. For example: -// -// %vreg1 = COPY %vreg0 -// ... -// ... = OP %vreg1 -// -// If -// - the physical register assigned to %vreg0 has not been clobbered by the -// time of the use of %vreg1 -// - the register class constraints are satisfied -// - the COPY def is the only value that reaches OP -// then this pass replaces the above with: -// -// %vreg1 = COPY %vreg0 -// ... -// ... = OP %vreg0 -// -// and updates the relevant state required by VirtRegMap (e.g. LiveIntervals). -// COPYs whose LiveIntervals become dead as a result of this forwarding (i.e. if -// all uses of %vreg1 are changed to %vreg0) are removed. -// -// When being run with only physical registers, this pass will also remove some -// redundant COPYs. For example: -// -// %R1 = COPY %R0 -// ... // No clobber of %R1 -// %R0 = COPY %R1 <<< Removed -// -// or -// -// %R1 = COPY %R0 -// ... // No clobber of %R0 -// %R1 = COPY %R0 <<< Removed +// This is an extremely simple MachineInstr-level copy propagation pass. // //===----------------------------------------------------------------------===// -#include "LiveDebugVariables.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" -#include "llvm/CodeGen/LiveRangeEdit.h" -#include "llvm/CodeGen/LiveStackAnalysis.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/Passes.h" -#include "llvm/CodeGen/VirtRegMap.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" @@ -74,48 +30,24 @@ using namespace llvm; #define DEBUG_TYPE "machine-cp" STATISTIC(NumDeletes, "Number of dead copies deleted"); -STATISTIC(NumCopyForwards, "Number of copy uses forwarded"); namespace { typedef SmallVector RegList; typedef DenseMap SourceMap; typedef DenseMap Reg2MIMap; - class MachineCopyPropagation : public MachineFunctionPass, - private LiveRangeEdit::Delegate { + class MachineCopyPropagation : public MachineFunctionPass { const TargetRegisterInfo *TRI; const TargetInstrInfo *TII; - MachineRegisterInfo *MRI; - MachineFunction *MF; - SlotIndexes *Indexes; - LiveIntervals *LIS; - const VirtRegMap *VRM; - // True if this pass being run before virtual registers are remapped to - // physical ones. - bool PreRegRewrite; - bool NoSubRegLiveness; - - protected: - MachineCopyPropagation(char &ID, bool PreRegRewrite) - : MachineFunctionPass(ID), PreRegRewrite(PreRegRewrite) {} + const MachineRegisterInfo *MRI; public: static char ID; // Pass identification, replacement for typeid - MachineCopyPropagation() : MachineCopyPropagation(ID, false) { + MachineCopyPropagation() : MachineFunctionPass(ID) { initializeMachineCopyPropagationPass(*PassRegistry::getPassRegistry()); } void getAnalysisUsage(AnalysisUsage &AU) const override { - if (PreRegRewrite) { - AU.addRequired(); - AU.addPreserved(); - AU.addRequired(); - AU.addPreserved(); - AU.addRequired(); - AU.addPreserved(); - AU.addPreserved(); - AU.addPreserved(); - } AU.setPreservesCFG(); MachineFunctionPass::getAnalysisUsage(AU); } @@ -123,10 +55,6 @@ namespace { bool runOnMachineFunction(MachineFunction &MF) override; MachineFunctionProperties getRequiredProperties() const override { - if (PreRegRewrite) - return MachineFunctionProperties() - .set(MachineFunctionProperties::Property::NoPHIs) - .set(MachineFunctionProperties::Property::TracksLiveness); return MachineFunctionProperties().set( MachineFunctionProperties::Property::NoVRegs); } @@ -136,28 +64,6 @@ namespace { void ReadRegister(unsigned Reg); void CopyPropagateBlock(MachineBasicBlock &MBB); bool eraseIfRedundant(MachineInstr &Copy, unsigned Src, unsigned Def); - unsigned getPhysReg(unsigned Reg, unsigned SubReg); - unsigned getPhysReg(const MachineOperand &Opnd) { - return getPhysReg(Opnd.getReg(), Opnd.getSubReg()); - } - unsigned getFullPhysReg(const MachineOperand &Opnd) { - return getPhysReg(Opnd.getReg(), 0); - } - void forwardUses(MachineInstr &MI); - bool isForwardableRegClassCopy(const MachineInstr &Copy, - const MachineInstr &UseI); - std::tuple - checkUseSubReg(const MachineOperand &CopySrc, const MachineOperand &MOUse); - bool hasImplicitOverlap(const MachineInstr &MI, const MachineOperand &Use); - void narrowRegClass(const MachineInstr &MI, const MachineOperand &MOUse, - unsigned NewUseReg, unsigned NewUseSubReg); - void updateForwardedCopyLiveInterval(const MachineInstr &Copy, - const MachineInstr &UseMI, - unsigned OrigUseReg, - unsigned NewUseReg, - unsigned NewUseSubReg); - /// LiveRangeEdit callback for eliminateDeadDefs(). - void LRE_WillEraseInstruction(MachineInstr *MI) override; /// Candidates for deletion. SmallSetVector MaybeDeadCopies; @@ -169,15 +75,6 @@ namespace { SourceMap SrcMap; bool Changed; }; - - class MachineCopyPropagationPreRegRewrite : public MachineCopyPropagation { - public: - static char ID; // Pass identification, replacement for typeid - MachineCopyPropagationPreRegRewrite() - : MachineCopyPropagation(ID, true) { - initializeMachineCopyPropagationPreRegRewritePass(*PassRegistry::getPassRegistry()); - } - }; } char MachineCopyPropagation::ID = 0; char &llvm::MachineCopyPropagationID = MachineCopyPropagation::ID; @@ -185,29 +82,6 @@ char &llvm::MachineCopyPropagationID = MachineCopyPropagation::ID; INITIALIZE_PASS(MachineCopyPropagation, DEBUG_TYPE, "Machine Copy Propagation Pass", false, false) -/// We have two separate passes that are very similar, the only difference being -/// where they are meant to be run in the pipeline. This is done for several -/// reasons: -/// - the two passes have different dependencies -/// - some targets want to disable the later run of this pass, but not the -/// earlier one (e.g. NVPTX and WebAssembly) -/// - it allows for easier debugging via llc - -char MachineCopyPropagationPreRegRewrite::ID = 0; -char &llvm::MachineCopyPropagationPreRegRewriteID = MachineCopyPropagationPreRegRewrite::ID; - -INITIALIZE_PASS_BEGIN(MachineCopyPropagationPreRegRewrite, - "machine-cp-prerewrite", - "Machine Copy Propagation Pre-Register Rewrite Pass", - false, false) -INITIALIZE_PASS_DEPENDENCY(SlotIndexes) -INITIALIZE_PASS_DEPENDENCY(LiveIntervals) -INITIALIZE_PASS_DEPENDENCY(VirtRegMap) -INITIALIZE_PASS_END(MachineCopyPropagationPreRegRewrite, - "machine-cp-prerewrite", - "Machine Copy Propagation Pre-Register Rewrite Pass", false, - false) - /// Remove any entry in \p Map where the register is a subregister or equal to /// a register contained in \p Regs. static void removeRegsFromMap(Reg2MIMap &Map, const RegList &Regs, @@ -248,10 +122,6 @@ void MachineCopyPropagation::ClobberRegister(unsigned Reg) { } void MachineCopyPropagation::ReadRegister(unsigned Reg) { - // We don't track MaybeDeadCopies when running pre-VirtRegRewriter. - if (PreRegRewrite) - return; - // If 'Reg' is defined by a copy, the copy is no longer a candidate // for elimination. for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) { @@ -283,46 +153,6 @@ static bool isNopCopy(const MachineInstr &PreviousCopy, unsigned Src, return SubIdx == TRI->getSubRegIndex(PreviousDef, Def); } -/// Return the physical register assigned to \p Reg if it is a virtual register, -/// otherwise just return the physical reg from the operand itself. -/// -/// If \p SubReg is 0 then return the full physical register assigned to the -/// virtual register ignoring subregs. If we aren't tracking sub-reg liveness -/// then we need to use this to be more conservative with clobbers by killing -/// all super reg and their sub reg COPYs as well. This is to prevent COPY -/// forwarding in cases like the following: -/// -/// %vreg2 = COPY %vreg1:sub1 -/// %vreg3 = COPY %vreg1:sub0 -/// ... = OP1 %vreg2 -/// ... = OP2 %vreg3 -/// -/// After forward %vreg2 (assuming this is the last use of %vreg1) and -/// VirtRegRewriter adding kill markers we have: -/// -/// %vreg3 = COPY %vreg1:sub0 -/// ... = OP1 %vreg1:sub1 -/// ... = OP2 %vreg3 -/// -/// If %vreg3 is assigned to a sub-reg of %vreg1, then after rewriting we have: -/// -/// ... = OP1 R0:sub1, R0 -/// ... = OP2 R0:sub0 -/// -/// and the use of R0 by OP2 will not have a valid definition. -unsigned MachineCopyPropagation::getPhysReg(unsigned Reg, unsigned SubReg) { - - // Physical registers cannot have subregs. - if (!TargetRegisterInfo::isVirtualRegister(Reg)) - return Reg; - - assert(PreRegRewrite && "Unexpected virtual register encountered"); - Reg = VRM->getPhys(Reg); - if (SubReg && !NoSubRegLiveness) - Reg = TRI->getSubReg(Reg, SubReg); - return Reg; -} - /// Remove instruction \p Copy if there exists a previous copy that copies the /// register \p Src to the register \p Def; This may happen indirectly by /// copying the super registers. @@ -360,325 +190,6 @@ bool MachineCopyPropagation::eraseIfRedundant(MachineInstr &Copy, unsigned Src, return true; } - -/// Decide whether we should forward the destination of \param Copy to its use -/// in \param UseI based on the register class of the Copy operands. Same-class -/// COPYs are always accepted by this function, but cross-class COPYs are only -/// accepted if they are forwarded to another COPY with the operand register -/// classes reversed. For example: -/// -/// RegClassA = COPY RegClassB // Copy parameter -/// ... -/// RegClassB = COPY RegClassA // UseI parameter -/// -/// which after forwarding becomes -/// -/// RegClassA = COPY RegClassB -/// ... -/// RegClassB = COPY RegClassB -/// -/// so we have reduced the number of cross-class COPYs and potentially -/// introduced a no COPY that can be removed. -bool MachineCopyPropagation::isForwardableRegClassCopy( - const MachineInstr &Copy, const MachineInstr &UseI) { - auto isCross = [&](const MachineOperand &Dst, const MachineOperand &Src) { - unsigned DstReg = Dst.getReg(); - unsigned SrcPhysReg = getPhysReg(Src); - const TargetRegisterClass *DstRC; - if (TargetRegisterInfo::isVirtualRegister(DstReg)) { - DstRC = MRI->getRegClass(DstReg); - unsigned DstSubReg = Dst.getSubReg(); - if (DstSubReg) - SrcPhysReg = TRI->getMatchingSuperReg(SrcPhysReg, DstSubReg, DstRC); - } else - DstRC = TRI->getMinimalPhysRegClass(DstReg); - - return !DstRC->contains(SrcPhysReg); - }; - - const MachineOperand &CopyDst = Copy.getOperand(0); - const MachineOperand &CopySrc = Copy.getOperand(1); - - if (!isCross(CopyDst, CopySrc)) - return true; - - if (!UseI.isCopy()) - return false; - - assert(getFullPhysReg(UseI.getOperand(1)) == getFullPhysReg(CopyDst)); - return !isCross(UseI.getOperand(0), CopySrc); -} - -/// Check that the subregs on the copy source operand (\p CopySrc) and the use -/// operand to be forwarded to (\p MOUse) are compatible with doing the -/// forwarding. Also computes the new register and subregister to be used in -/// the forwarded-to instruction. -std::tuple MachineCopyPropagation::checkUseSubReg( - const MachineOperand &CopySrc, const MachineOperand &MOUse) { - unsigned NewUseReg = CopySrc.getReg(); - unsigned NewUseSubReg; - - if (TargetRegisterInfo::isPhysicalRegister(NewUseReg)) { - // If MOUse is a virtual reg, we need to apply it to the new physical reg - // we're going to replace it with. - if (MOUse.getSubReg()) - NewUseReg = TRI->getSubReg(NewUseReg, MOUse.getSubReg()); - // If the original use subreg isn't valid on the new src reg, we can't - // forward it here. - if (!NewUseReg) - return std::make_tuple(0, 0, false); - NewUseSubReg = 0; - } else { - // %v1 = COPY %v2:sub1 - // USE %v1:sub2 - // The new use is %v2:sub1:sub2 - NewUseSubReg = - TRI->composeSubRegIndices(CopySrc.getSubReg(), MOUse.getSubReg()); - // Check that NewUseSubReg is valid on NewUseReg - if (NewUseSubReg && - !TRI->getSubClassWithSubReg(MRI->getRegClass(NewUseReg), NewUseSubReg)) - return std::make_tuple(0, 0, false); - } - - return std::make_tuple(NewUseReg, NewUseSubReg, true); -} - -/// Check that \p MI does not have implicit uses that overlap with it's \p Use -/// operand (the register being replaced), since these can sometimes be -/// implicitly tied to other operands. For example, on AMDGPU: -/// -/// V_MOVRELS_B32_e32 %VGPR2, %M0, %EXEC, %VGPR2_VGPR3_VGPR4_VGPR5 -/// -/// the %VGPR2 is implicitly tied to the larger reg operand, but we have no -/// way of knowing we need to update the latter when updating the former. -bool MachineCopyPropagation::hasImplicitOverlap(const MachineInstr &MI, - const MachineOperand &Use) { - if (!TargetRegisterInfo::isPhysicalRegister(Use.getReg())) - return false; - - for (const MachineOperand &MIUse : MI.uses()) - if (&MIUse != &Use && MIUse.isReg() && MIUse.isImplicit() && - TRI->regsOverlap(Use.getReg(), MIUse.getReg())) - return true; - - return false; -} - -/// Narrow the register class of the forwarded vreg so it matches any -/// instruction constraints. \p MI is the instruction being forwarded to. \p -/// MOUse is the operand being replaced in \p MI (which hasn't yet been updated -/// at the time this function is called). \p NewUseReg and \p NewUseSubReg are -/// what the \p MOUse will be changed to after forwarding. -/// -/// If we are forwarding -/// A:RCA = COPY B:RCB -/// into -/// ... = OP A:RCA -/// -/// then we need to narrow the register class of B so that it is a subclass -/// of RCA so that it meets the instruction register class constraints. -void MachineCopyPropagation::narrowRegClass(const MachineInstr &MI, - const MachineOperand &MOUse, - unsigned NewUseReg, - unsigned NewUseSubReg) { - if (!TargetRegisterInfo::isVirtualRegister(NewUseReg)) - return; - - // Make sure the virtual reg class allows the subreg. - if (NewUseSubReg) { - const TargetRegisterClass *CurUseRC = MRI->getRegClass(NewUseReg); - const TargetRegisterClass *NewUseRC = - TRI->getSubClassWithSubReg(CurUseRC, NewUseSubReg); - if (CurUseRC != NewUseRC) { - DEBUG(dbgs() << "MCP: Setting regclass of " << PrintReg(NewUseReg, TRI) - << " to " << TRI->getRegClassName(NewUseRC) << "\n"); - MRI->setRegClass(NewUseReg, NewUseRC); - } - } - - unsigned MOUseOpNo = &MOUse - &MI.getOperand(0); - const TargetRegisterClass *InstRC = - TII->getRegClass(MI.getDesc(), MOUseOpNo, TRI, *MF); - if (InstRC) { - const TargetRegisterClass *CurUseRC = MRI->getRegClass(NewUseReg); - if (NewUseSubReg) - InstRC = TRI->getMatchingSuperRegClass(CurUseRC, InstRC, NewUseSubReg); - if (!InstRC->hasSubClassEq(CurUseRC)) { - const TargetRegisterClass *NewUseRC = - TRI->getCommonSubClass(InstRC, CurUseRC); - DEBUG(dbgs() << "MCP: Setting regclass of " << PrintReg(NewUseReg, TRI) - << " to " << TRI->getRegClassName(NewUseRC) << "\n"); - MRI->setRegClass(NewUseReg, NewUseRC); - } - } -} - -/// Update the LiveInterval information to reflect the destination of \p Copy -/// being forwarded to a use in \p UseMI. \p OrigUseReg is the register being -/// forwarded through. It should be the destination register of \p Copy and has -/// already been replaced in \p UseMI at the point this function is called. \p -/// NewUseReg and \p NewUseSubReg are the register and subregister being -/// forwarded. They should be the source register of the \p Copy and should be -/// the value of the \p UseMI operand being forwarded at the point this function -/// is called. -void MachineCopyPropagation::updateForwardedCopyLiveInterval( - const MachineInstr &Copy, const MachineInstr &UseMI, unsigned OrigUseReg, - unsigned NewUseReg, unsigned NewUseSubReg) { - - assert(TRI->isSubRegisterEq(getPhysReg(OrigUseReg, 0), - getFullPhysReg(Copy.getOperand(0))) && - "OrigUseReg mismatch"); - assert(TRI->isSubRegisterEq(getFullPhysReg(Copy.getOperand(1)), - getPhysReg(NewUseReg, 0)) && - "NewUseReg mismatch"); - - // Extend live range starting from COPY early-clobber slot, since that - // is where the original src live range ends. - SlotIndex CopyUseIdx = - Indexes->getInstructionIndex(Copy).getRegSlot(true /*=EarlyClobber*/); - SlotIndex UseIdx = Indexes->getInstructionIndex(UseMI).getRegSlot(); - if (TargetRegisterInfo::isVirtualRegister(NewUseReg)) { - LiveInterval &LI = LIS->getInterval(NewUseReg); - LI.extendInBlock(CopyUseIdx, UseIdx); - LaneBitmask UseMask = TRI->getSubRegIndexLaneMask(NewUseSubReg); - for (auto &S : LI.subranges()) - if ((S.LaneMask & UseMask).any() && S.find(CopyUseIdx)) - S.extendInBlock(CopyUseIdx, UseIdx); - } else { - assert(NewUseSubReg == 0 && "Unexpected subreg on physical register!"); - for (MCRegUnitIterator UI(NewUseReg, TRI); UI.isValid(); ++UI) { - LiveRange &LR = LIS->getRegUnit(*UI); - LR.extendInBlock(CopyUseIdx, UseIdx); - } - } - - if (!TargetRegisterInfo::isVirtualRegister(OrigUseReg)) - return; - - LiveInterval &LI = LIS->getInterval(OrigUseReg); - - // Can happen for undef uses. - if (LI.empty()) - return; - - SlotIndex UseIndex = Indexes->getInstructionIndex(UseMI); - const LiveRange::Segment *UseSeg = LI.getSegmentContaining(UseIndex); - - // Only shrink if forwarded use is the end of a segment. - if (UseSeg->end != UseIndex.getRegSlot()) - return; - - SmallVector DeadInsts; - LIS->shrinkToUses(&LI, &DeadInsts); - if (!DeadInsts.empty()) { - SmallVector NewRegs; - LiveRangeEdit(nullptr, NewRegs, *MF, *LIS, nullptr, this) - .eliminateDeadDefs(DeadInsts); - } -} - -void MachineCopyPropagation::LRE_WillEraseInstruction(MachineInstr *MI) { - // Remove this COPY from further consideration for forwarding. - ClobberRegister(getFullPhysReg(MI->getOperand(0))); - Changed = true; -} - -/// Look for available copies whose destination register is used by \p MI and -/// replace the use in \p MI with the copy's source register. -void MachineCopyPropagation::forwardUses(MachineInstr &MI) { - if (AvailCopyMap.empty()) - return; - - // Look for non-tied explicit vreg uses that have an active COPY - // instruction that defines the physical register allocated to them. - // Replace the vreg with the source of the active COPY. - for (MachineOperand &MOUse : MI.explicit_uses()) { - if (!MOUse.isReg() || MOUse.isTied()) - continue; - - unsigned UseReg = MOUse.getReg(); - if (!UseReg) - continue; - - if (TargetRegisterInfo::isVirtualRegister(UseReg)) - UseReg = VRM->getPhys(UseReg); - else if (MI.isCall() || MI.isReturn() || MI.isInlineAsm() || - MI.hasUnmodeledSideEffects() || MI.isDebugValue() || MI.isKill()) - // Some instructions seem to have ABI uses e.g. not marked as - // implicit, which can lead to forwarding them when we shouldn't, so - // restrict the types of instructions we forward physical regs into. - continue; - - // Don't forward COPYs via non-allocatable regs since they can have - // non-standard semantics. - if (!MRI->isAllocatable(UseReg)) - continue; - - auto CI = AvailCopyMap.find(UseReg); - if (CI == AvailCopyMap.end()) - continue; - - MachineInstr &Copy = *CI->second; - MachineOperand &CopyDst = Copy.getOperand(0); - MachineOperand &CopySrc = Copy.getOperand(1); - - // Don't forward COPYs that are already NOPs due to register assignment. - if (getPhysReg(CopyDst) == getPhysReg(CopySrc)) - continue; - - // FIXME: Don't handle partial uses of wider COPYs yet. - if (CopyDst.getSubReg() != 0 || UseReg != getPhysReg(CopyDst)) - continue; - - // Don't forward COPYs of non-allocatable regs unless they are constant. - unsigned CopySrcReg = CopySrc.getReg(); - if (TargetRegisterInfo::isPhysicalRegister(CopySrcReg) && - !MRI->isAllocatable(CopySrcReg) && !MRI->isConstantPhysReg(CopySrcReg)) - continue; - - if (!isForwardableRegClassCopy(Copy, MI)) - continue; - - unsigned NewUseReg, NewUseSubReg; - bool SubRegOK; - std::tie(NewUseReg, NewUseSubReg, SubRegOK) = - checkUseSubReg(CopySrc, MOUse); - if (!SubRegOK) - continue; - - if (hasImplicitOverlap(MI, MOUse)) - continue; - - DEBUG(dbgs() << "MCP: Replacing " - << PrintReg(MOUse.getReg(), TRI, MOUse.getSubReg()) - << "\n with " - << PrintReg(NewUseReg, TRI, CopySrc.getSubReg()) - << "\n in " - << MI - << " from " - << Copy); - - narrowRegClass(MI, MOUse, NewUseReg, NewUseSubReg); - - unsigned OrigUseReg = MOUse.getReg(); - MOUse.setReg(NewUseReg); - MOUse.setSubReg(NewUseSubReg); - - DEBUG(dbgs() << "MCP: After replacement: " << MI << "\n"); - - if (PreRegRewrite) - updateForwardedCopyLiveInterval(Copy, MI, OrigUseReg, NewUseReg, - NewUseSubReg); - else - for (MachineInstr &KMI : - make_range(Copy.getIterator(), std::next(MI.getIterator()))) - KMI.clearRegisterKills(NewUseReg, TRI); - - ++NumCopyForwards; - Changed = true; - } -} - void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) { DEBUG(dbgs() << "MCP: CopyPropagateBlock " << MBB.getName() << "\n"); @@ -687,8 +198,12 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) { ++I; if (MI->isCopy()) { - unsigned Def = getPhysReg(MI->getOperand(0)); - unsigned Src = getPhysReg(MI->getOperand(1)); + unsigned Def = MI->getOperand(0).getReg(); + unsigned Src = MI->getOperand(1).getReg(); + + assert(!TargetRegisterInfo::isVirtualRegister(Def) && + !TargetRegisterInfo::isVirtualRegister(Src) && + "MachineCopyPropagation should be run after register allocation!"); // The two copies cancel out and the source of the first copy // hasn't been overridden, eliminate the second one. e.g. @@ -705,16 +220,8 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) { // %ECX = COPY %EAX // => // %ECX = COPY %EAX - if (!PreRegRewrite) - if (eraseIfRedundant(*MI, Def, Src) || eraseIfRedundant(*MI, Src, Def)) - continue; - - forwardUses(*MI); - - // Src may have been changed by forwardUses() - Src = getPhysReg(MI->getOperand(1)); - unsigned DefClobber = getFullPhysReg(MI->getOperand(0)); - unsigned SrcClobber = getFullPhysReg(MI->getOperand(1)); + if (eraseIfRedundant(*MI, Def, Src) || eraseIfRedundant(*MI, Src, Def)) + continue; // If Src is defined by a previous copy, the previous copy cannot be // eliminated. @@ -731,10 +238,7 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) { DEBUG(dbgs() << "MCP: Copy is a deletion candidate: "; MI->dump()); // Copy is now a candidate for deletion. - // Only look for dead COPYs if we're not running just before - // VirtRegRewriter, since presumably these COPYs will have already been - // removed. - if (!PreRegRewrite && !MRI->isReserved(Def)) + if (!MRI->isReserved(Def)) MaybeDeadCopies.insert(MI); // If 'Def' is previously source of another copy, then this earlier copy's @@ -744,11 +248,11 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) { // %xmm2 = copy %xmm0 // ... // %xmm2 = copy %xmm9 - ClobberRegister(DefClobber); + ClobberRegister(Def); for (const MachineOperand &MO : MI->implicit_operands()) { if (!MO.isReg() || !MO.isDef()) continue; - unsigned Reg = getFullPhysReg(MO); + unsigned Reg = MO.getReg(); if (!Reg) continue; ClobberRegister(Reg); @@ -763,27 +267,13 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) { // Remember source that's copied to Def. Once it's clobbered, then // it's no longer available for copy propagation. - RegList &DestList = SrcMap[SrcClobber]; - if (!is_contained(DestList, DefClobber)) - DestList.push_back(DefClobber); + RegList &DestList = SrcMap[Src]; + if (!is_contained(DestList, Def)) + DestList.push_back(Def); continue; } - // Clobber any earlyclobber regs first. - for (const MachineOperand &MO : MI->operands()) - if (MO.isReg() && MO.isEarlyClobber()) { - unsigned Reg = getFullPhysReg(MO); - // If we have a tied earlyclobber, that means it is also read by this - // instruction, so we need to make sure we don't remove it as dead - // later. - if (MO.isTied()) - ReadRegister(Reg); - ClobberRegister(Reg); - } - - forwardUses(*MI); - // Not a copy. SmallVector Defs; const MachineOperand *RegMask = nullptr; @@ -792,11 +282,14 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) { RegMask = &MO; if (!MO.isReg()) continue; - unsigned Reg = getFullPhysReg(MO); + unsigned Reg = MO.getReg(); if (!Reg) continue; - if (MO.isDef() && !MO.isEarlyClobber()) { + assert(!TargetRegisterInfo::isVirtualRegister(Reg) && + "MachineCopyPropagation should be run after register allocation!"); + + if (MO.isDef()) { Defs.push_back(Reg); continue; } else if (MO.readsReg()) @@ -853,8 +346,6 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) { // since we don't want to trust live-in lists. if (MBB.succ_empty()) { for (MachineInstr *MaybeDead : MaybeDeadCopies) { - DEBUG(dbgs() << "MCP: Removing copy due to no live-out succ: "; - MaybeDead->dump()); assert(!MRI->isReserved(MaybeDead->getOperand(0).getReg())); MaybeDead->eraseFromParent(); Changed = true; @@ -877,16 +368,10 @@ bool MachineCopyPropagation::runOnMachineFunction(MachineFunction &MF) { TRI = MF.getSubtarget().getRegisterInfo(); TII = MF.getSubtarget().getInstrInfo(); MRI = &MF.getRegInfo(); - this->MF = &MF; - if (PreRegRewrite) { - Indexes = &getAnalysis(); - LIS = &getAnalysis(); - VRM = &getAnalysis(); - } - NoSubRegLiveness = !MRI->subRegLivenessEnabled(); for (MachineBasicBlock &MBB : MF) CopyPropagateBlock(MBB); return Changed; } + diff --git a/llvm/lib/CodeGen/TargetPassConfig.cpp b/llvm/lib/CodeGen/TargetPassConfig.cpp index 7860a932c2b3..481baea2dff0 100644 --- a/llvm/lib/CodeGen/TargetPassConfig.cpp +++ b/llvm/lib/CodeGen/TargetPassConfig.cpp @@ -88,8 +88,6 @@ static cl::opt DisableCGP("disable-cgp", cl::Hidden, cl::desc("Disable Codegen Prepare")); static cl::opt DisableCopyProp("disable-copyprop", cl::Hidden, cl::desc("Disable Copy Propagation pass")); -static cl::opt DisableCopyPropPreRegRewrite("disable-copyprop-prerewrite", cl::Hidden, - cl::desc("Disable Copy Propagation Pre-Register Re-write pass")); static cl::opt DisablePartialLibcallInlining("disable-partial-libcall-inlining", cl::Hidden, cl::desc("Disable Partial Libcall Inlining")); static cl::opt EnableImplicitNullChecks( @@ -250,9 +248,6 @@ static IdentifyingPassPtr overridePass(AnalysisID StandardID, if (StandardID == &MachineCopyPropagationID) return applyDisable(TargetID, DisableCopyProp); - if (StandardID == &MachineCopyPropagationPreRegRewriteID) - return applyDisable(TargetID, DisableCopyPropPreRegRewrite); - return TargetID; } @@ -1064,10 +1059,6 @@ void TargetPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) { // Allow targets to change the register assignments before rewriting. addPreRewrite(); - // Copy propagate to forward register uses and try to eliminate COPYs that - // were not coalesced. - addPass(&MachineCopyPropagationPreRegRewriteID); - // Finally rewrite virtual registers. addPass(&VirtRegRewriterID); diff --git a/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll b/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll index 55ddaf8b65f1..0dfe04b664d0 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll @@ -9,8 +9,7 @@ define i16 @halfword(%struct.a* %ctx, i32 %xor72) nounwind { ; CHECK-LABEL: halfword: ; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8 ; CHECK: ldrh [[REG1:w[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #1] -; CHECK: mov [[REG3:x[0-9]+]], [[REG2]] -; CHECK: strh [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #1] +; CHECK: strh [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #1] %shr81 = lshr i32 %xor72, 9 %conv82 = zext i32 %shr81 to i64 %idxprom83 = and i64 %conv82, 255 @@ -25,8 +24,7 @@ define i32 @word(%struct.b* %ctx, i32 %xor72) nounwind { ; CHECK-LABEL: word: ; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8 ; CHECK: ldr [[REG1:w[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #2] -; CHECK: mov [[REG3:x[0-9]+]], [[REG2]] -; CHECK: str [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #2] +; CHECK: str [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #2] %shr81 = lshr i32 %xor72, 9 %conv82 = zext i32 %shr81 to i64 %idxprom83 = and i64 %conv82, 255 @@ -41,8 +39,7 @@ define i64 @doubleword(%struct.c* %ctx, i32 %xor72) nounwind { ; CHECK-LABEL: doubleword: ; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8 ; CHECK: ldr [[REG1:x[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #3] -; CHECK: mov [[REG3:x[0-9]+]], [[REG2]] -; CHECK: str [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #3] +; CHECK: str [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #3] %shr81 = lshr i32 %xor72, 9 %conv82 = zext i32 %shr81 to i64 %idxprom83 = and i64 %conv82, 255 diff --git a/llvm/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll b/llvm/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll index 0277f4c92b05..649bc25b7265 100644 --- a/llvm/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll +++ b/llvm/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll @@ -8,9 +8,15 @@ define <2 x i64> @bar(<2 x i64> %a, <2 x i64> %b) nounwind readnone { ; CHECK: add.2d v[[REG:[0-9]+]], v0, v1 ; CHECK: add d[[REG3:[0-9]+]], d[[REG]], d1 ; CHECK: sub d[[REG2:[0-9]+]], d[[REG]], d1 -; CHECK-NOT: fmov +; Without advanced copy optimization, we end up with cross register +; banks copies that cannot be coalesced. +; CHECK-NOOPT: fmov [[COPY_REG3:x[0-9]+]], d[[REG3]] +; With advanced copy optimization, we end up with just one copy +; to insert the computed high part into the V register. +; CHECK-OPT-NOT: fmov ; CHECK: fmov [[COPY_REG2:x[0-9]+]], d[[REG2]] -; CHECK-NOT: fmov +; CHECK-NOOPT: fmov d0, [[COPY_REG3]] +; CHECK-OPT-NOT: fmov ; CHECK: ins.d v0[1], [[COPY_REG2]] ; CHECK-NEXT: ret ; @@ -18,9 +24,11 @@ define <2 x i64> @bar(<2 x i64> %a, <2 x i64> %b) nounwind readnone { ; GENERIC: add v[[REG:[0-9]+]].2d, v0.2d, v1.2d ; GENERIC: add d[[REG3:[0-9]+]], d[[REG]], d1 ; GENERIC: sub d[[REG2:[0-9]+]], d[[REG]], d1 -; GENERIC-NOT: fmov +; GENERIC-NOOPT: fmov [[COPY_REG3:x[0-9]+]], d[[REG3]] +; GENERIC-OPT-NOT: fmov ; GENERIC: fmov [[COPY_REG2:x[0-9]+]], d[[REG2]] -; GENERIC-NOT: fmov +; GENERIC-NOOPT: fmov d0, [[COPY_REG3]] +; GENERIC-OPT-NOT: fmov ; GENERIC: ins v0.d[1], [[COPY_REG2]] ; GENERIC-NEXT: ret %add = add <2 x i64> %a, %b diff --git a/llvm/test/CodeGen/AArch64/arm64-zero-cycle-regmov.ll b/llvm/test/CodeGen/AArch64/arm64-zero-cycle-regmov.ll index 60a62030e44b..c56d607aa812 100644 --- a/llvm/test/CodeGen/AArch64/arm64-zero-cycle-regmov.ll +++ b/llvm/test/CodeGen/AArch64/arm64-zero-cycle-regmov.ll @@ -4,10 +4,8 @@ define i32 @t(i32 %a, i32 %b, i32 %c, i32 %d) nounwind ssp { entry: ; CHECK-LABEL: t: -; CHECK: mov [[REG2:x[0-9]+]], x3 -; CHECK: mov [[REG1:x[0-9]+]], x2 -; CHECK: mov x0, x2 -; CHECK: mov x1, x3 +; CHECK: mov x0, [[REG1:x[0-9]+]] +; CHECK: mov x1, [[REG2:x[0-9]+]] ; CHECK: bl _foo ; CHECK: mov x0, [[REG1]] ; CHECK: mov x1, [[REG2]] diff --git a/llvm/test/CodeGen/AArch64/f16-instructions.ll b/llvm/test/CodeGen/AArch64/f16-instructions.ll index 35c611b0812e..613c71a558bd 100644 --- a/llvm/test/CodeGen/AArch64/f16-instructions.ll +++ b/llvm/test/CodeGen/AArch64/f16-instructions.ll @@ -350,7 +350,7 @@ else: ; CHECK-LABEL: test_phi: ; CHECK: mov x[[PTR:[0-9]+]], x0 -; CHECK: ldr h[[AB:[0-9]+]], [x0] +; CHECK: ldr h[[AB:[0-9]+]], [x[[PTR]]] ; CHECK: [[LOOP:LBB[0-9_]+]]: ; CHECK: mov.16b v[[R:[0-9]+]], v[[AB]] ; CHECK: ldr h[[AB]], [x[[PTR]]] diff --git a/llvm/test/CodeGen/AArch64/flags-multiuse.ll b/llvm/test/CodeGen/AArch64/flags-multiuse.ll index a13f7e1e34ac..0827fb8c9e8c 100644 --- a/llvm/test/CodeGen/AArch64/flags-multiuse.ll +++ b/llvm/test/CodeGen/AArch64/flags-multiuse.ll @@ -17,9 +17,6 @@ define i32 @test_multiflag(i32 %n, i32 %m, i32 %o) { %val = zext i1 %test to i32 ; CHECK: cset {{[xw][0-9]+}}, ne -; CHECK: mov [[RHSCOPY:w[0-9]+]], [[RHS]] -; CHECK: mov [[LHSCOPY:w[0-9]+]], [[LHS]] - store i32 %val, i32* @var call void @bar() @@ -28,7 +25,7 @@ define i32 @test_multiflag(i32 %n, i32 %m, i32 %o) { ; Currently, the comparison is emitted again. An MSR/MRS pair would also be ; acceptable, but assuming the call preserves NZCV is not. br i1 %test, label %iftrue, label %iffalse -; CHECK: cmp [[LHSCOPY]], [[RHSCOPY]] +; CHECK: cmp [[LHS]], [[RHS]] ; CHECK: b.eq iftrue: diff --git a/llvm/test/CodeGen/AArch64/merge-store-dependency.ll b/llvm/test/CodeGen/AArch64/merge-store-dependency.ll index c33248f56f4c..4f2af9ed7e65 100644 --- a/llvm/test/CodeGen/AArch64/merge-store-dependency.ll +++ b/llvm/test/CodeGen/AArch64/merge-store-dependency.ll @@ -8,9 +8,10 @@ define void @test(%struct1* %fde, i32 %fd, void (i32, i32, i8*)* %func, i8* %arg) { ;CHECK-LABEL: test entry: +; A53: mov [[DATA:w[0-9]+]], w1 ; A53: str q{{[0-9]+}}, {{.*}} ; A53: str q{{[0-9]+}}, {{.*}} -; A53: str w1, {{.*}} +; A53: str [[DATA]], {{.*}} %0 = bitcast %struct1* %fde to i8* tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 40, i32 8, i1 false) diff --git a/llvm/test/CodeGen/AArch64/neg-imm.ll b/llvm/test/CodeGen/AArch64/neg-imm.ll index ee95f37c203c..46bded78cc59 100644 --- a/llvm/test/CodeGen/AArch64/neg-imm.ll +++ b/llvm/test/CodeGen/AArch64/neg-imm.ll @@ -7,8 +7,8 @@ declare void @foo(i32) define void @test(i32 %px) { ; CHECK_LABEL: test: ; CHECK_LABEL: %entry -; CHECK: subs [[REG0:w[0-9]+]], -; CHECK: csel {{w[0-9]+}}, wzr, [[REG0]] +; CHECK: subs +; CHECK-NEXT: csel entry: %sub = add nsw i32 %px, -1 %cmp = icmp slt i32 %px, 1 diff --git a/llvm/test/CodeGen/AMDGPU/byval-frame-setup.ll b/llvm/test/CodeGen/AMDGPU/byval-frame-setup.ll index f7b9b11d37bc..723c78ad647a 100644 --- a/llvm/test/CodeGen/AMDGPU/byval-frame-setup.ll +++ b/llvm/test/CodeGen/AMDGPU/byval-frame-setup.ll @@ -127,21 +127,20 @@ entry: } ; GCN-LABEL: {{^}}call_void_func_byval_struct_kernel: -; GCN: s_add_u32 s32, s7, 0xa00{{$}} +; GCN: s_mov_b32 s33, s7 +; GCN: s_add_u32 s32, s33, 0xa00{{$}} ; GCN-DAG: v_mov_b32_e32 [[NINE:v[0-9]+]], 9 ; GCN-DAG: v_mov_b32_e32 [[THIRTEEN:v[0-9]+]], 13 -; GCN-DAG: buffer_store_dword [[NINE]], off, s[0:3], s7 offset:8 -; GCN: buffer_store_dword [[THIRTEEN]], off, s[0:3], s7 offset:24 - -; GCN: s_mov_b32 s33, s7 +; GCN-DAG: buffer_store_dword [[NINE]], off, s[0:3], s33 offset:8 +; GCN: buffer_store_dword [[THIRTEEN]], off, s[0:3], s33 offset:24 ; GCN-DAG: s_add_u32 s32, s32, 0x800{{$}} -; GCN-DAG: buffer_load_dword [[LOAD0:v[0-9]+]], off, s[0:3], s{{7|33}} offset:8 -; GCN-DAG: buffer_load_dword [[LOAD1:v[0-9]+]], off, s[0:3], s{{7|33}} offset:12 -; GCN-DAG: buffer_load_dword [[LOAD2:v[0-9]+]], off, s[0:3], s{{7|33}} offset:16 -; GCN-DAG: buffer_load_dword [[LOAD3:v[0-9]+]], off, s[0:3], s{{7|33}} offset:20 +; GCN-DAG: buffer_load_dword [[LOAD0:v[0-9]+]], off, s[0:3], s33 offset:8 +; GCN-DAG: buffer_load_dword [[LOAD1:v[0-9]+]], off, s[0:3], s33 offset:12 +; GCN-DAG: buffer_load_dword [[LOAD2:v[0-9]+]], off, s[0:3], s33 offset:16 +; GCN-DAG: buffer_load_dword [[LOAD3:v[0-9]+]], off, s[0:3], s33 offset:20 ; GCN-DAG: buffer_store_dword [[LOAD0]], off, s[0:3], s32 offset:4{{$}} ; GCN-DAG: buffer_store_dword [[LOAD1]], off, s[0:3], s32 offset:8 diff --git a/llvm/test/CodeGen/AMDGPU/call-argument-types.ll b/llvm/test/CodeGen/AMDGPU/call-argument-types.ll index e47b0a2a2a34..589b333e608b 100644 --- a/llvm/test/CodeGen/AMDGPU/call-argument-types.ll +++ b/llvm/test/CodeGen/AMDGPU/call-argument-types.ll @@ -65,17 +65,17 @@ define amdgpu_kernel void @test_call_external_void_func_i1_imm() #0 { ; GCN-LABEL: {{^}}test_call_external_void_func_i1_signext: ; MESA: s_mov_b32 s33, s3{{$}} -; MESA-DAG: s_mov_b32 s32, s3{{$}} ; HSA: s_mov_b32 s33, s9{{$}} -; HSA: s_mov_b32 s32, s9{{$}} ; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}} ; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i1_signext@rel32@lo+4 ; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i1_signext@rel32@hi+4 ; GCN-NEXT: buffer_load_ubyte [[VAR:v[0-9]+]] ; HSA-NEXT: s_mov_b32 s4, s33 +; HSA-NEXT: s_mov_b32 s32, s33 ; MESA-DAG: s_mov_b32 s4, s33{{$}} +; MESA-DAG: s_mov_b32 s32, s33{{$}} ; GCN: s_waitcnt vmcnt(0) ; GCN-NEXT: v_bfe_i32 v0, v0, 0, 1 @@ -90,8 +90,6 @@ define amdgpu_kernel void @test_call_external_void_func_i1_signext(i32) #0 { ; FIXME: load should be scheduled before getpc ; GCN-LABEL: {{^}}test_call_external_void_func_i1_zeroext: ; MESA: s_mov_b32 s33, s3{{$}} -; MESA: s_mov_b32 s32, s3{{$}} -; HSA: s_mov_b32 s32, s9{{$}} ; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}} ; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i1_zeroext@rel32@lo+4 @@ -99,6 +97,7 @@ define amdgpu_kernel void @test_call_external_void_func_i1_signext(i32) #0 { ; GCN-NEXT: buffer_load_ubyte v0 ; GCN-DAG: s_mov_b32 s4, s33{{$}} +; GCN-DAG: s_mov_b32 s32, s33{{$}} ; GCN: s_waitcnt vmcnt(0) ; GCN-NEXT: v_and_b32_e32 v0, 1, v0 @@ -112,15 +111,14 @@ define amdgpu_kernel void @test_call_external_void_func_i1_zeroext(i32) #0 { ; GCN-LABEL: {{^}}test_call_external_void_func_i8_imm: ; MESA-DAG: s_mov_b32 s33, s3{{$}} -; MESA: s_mov_b32 s32, s3{{$}} ; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}} ; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i8@rel32@lo+4 ; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i8@rel32@hi+4 ; GCN-NEXT: v_mov_b32_e32 v0, 0x7b -; HSA-DAG: s_mov_b32 s4, s9{{$}} -; HSA-DAG: s_mov_b32 s32, s9{{$}} +; HSA-DAG: s_mov_b32 s4, s33{{$}} +; GCN-DAG: s_mov_b32 s32, s33{{$}} ; GCN: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}} ; GCN-NEXT: s_endpgm @@ -131,17 +129,16 @@ define amdgpu_kernel void @test_call_external_void_func_i8_imm(i32) #0 { ; FIXME: don't wait before call ; GCN-LABEL: {{^}}test_call_external_void_func_i8_signext: +; HSA-DAG: s_mov_b32 s33, s9{{$}} ; MESA-DAG: s_mov_b32 s33, s3{{$}} -; MESA-DAG: s_mov_b32 s32, s3{{$}} ; GCN-DAG: buffer_load_sbyte v0 ; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}} ; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i8_signext@rel32@lo+4 ; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i8_signext@rel32@hi+4 -; MESA-DAG: s_mov_b32 s4, s33 -; HSA-DAG: s_mov_b32 s4, s9 -; HSA-DAG: s_mov_b32 s32, s9{{$}} +; GCN-DAG: s_mov_b32 s4, s33 +; GCN-DAG: s_mov_b32 s32, s3 ; GCN: s_waitcnt vmcnt(0) ; GCN-NEXT: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}} @@ -154,16 +151,15 @@ define amdgpu_kernel void @test_call_external_void_func_i8_signext(i32) #0 { ; GCN-LABEL: {{^}}test_call_external_void_func_i8_zeroext: ; MESA-DAG: s_mov_b32 s33, s3{{$}} -; MESA-DAG: s_mov_b32 s32, s3{{$}} +; HSA-DAG: s_mov_b32 s33, s9{{$}} ; GCN-DAG: buffer_load_ubyte v0 ; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}} ; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i8_zeroext@rel32@lo+4 ; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i8_zeroext@rel32@hi+4 -; MESA-DAG: s_mov_b32 s4, s33 -; HSA-DAG: s_mov_b32 s4, s9 -; HSA-DAG: s_mov_b32 s32, s9 +; GCN-DAG: s_mov_b32 s4, s33 +; GCN-DAG: s_mov_b32 s32, s33 ; GCN: s_waitcnt vmcnt(0) ; GCN-NEXT: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}} @@ -178,8 +174,7 @@ define amdgpu_kernel void @test_call_external_void_func_i8_zeroext(i32) #0 { ; GCN-DAG: v_mov_b32_e32 v0, 0x7b{{$}} ; GCN-DAG: s_mov_b32 s4, s33 -; MESA-DAG: s_mov_b32 s32, s1 -; HSA-DAG: s_mov_b32 s32, s7 +; GCN-DAG: s_mov_b32 s32, s33 ; GCN: s_swappc_b64 define amdgpu_kernel void @test_call_external_void_func_i16_imm() #0 { @@ -189,16 +184,14 @@ define amdgpu_kernel void @test_call_external_void_func_i16_imm() #0 { ; GCN-LABEL: {{^}}test_call_external_void_func_i16_signext: ; MESA-DAG: s_mov_b32 s33, s3{{$}} -; MESA-DAG: s_mov_b32 s32, s3{{$}} ; GCN-DAG: buffer_load_sshort v0 ; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}} ; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i16_signext@rel32@lo+4 ; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i16_signext@rel32@hi+4 -; MESA-DAG: s_mov_b32 s4, s33 -; HSA-DAG: s_mov_b32 s4, s9 -; HSA-DAG: s_mov_b32 s32, s9 +; GCN-DAG: s_mov_b32 s4, s33 +; GCN-DAG: s_mov_b32 s32, s33 ; GCN: s_waitcnt vmcnt(0) ; GCN-NEXT: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}} @@ -211,7 +204,6 @@ define amdgpu_kernel void @test_call_external_void_func_i16_signext(i32) #0 { ; GCN-LABEL: {{^}}test_call_external_void_func_i16_zeroext: ; MESA-DAG: s_mov_b32 s33, s3{{$}} -; MESA-DAG: s_mov_b32 s32, s3{{$}} ; GCN-DAG: buffer_load_ushort v0 @@ -219,9 +211,8 @@ define amdgpu_kernel void @test_call_external_void_func_i16_signext(i32) #0 { ; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i16_zeroext@rel32@lo+4 ; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i16_zeroext@rel32@hi+4 -; MESA-DAG: s_mov_b32 s4, s33 -; HSA-DAG: s_mov_b32 s4, s9 -; HSA-DAG: s_mov_b32 s32, s9 +; GCN-DAG: s_mov_b32 s4, s33 +; GCN-DAG: s_mov_b32 s32, s33 ; GCN: s_waitcnt vmcnt(0) ; GCN-NEXT: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}} @@ -234,15 +225,13 @@ define amdgpu_kernel void @test_call_external_void_func_i16_zeroext(i32) #0 { ; GCN-LABEL: {{^}}test_call_external_void_func_i32_imm: ; MESA-DAG: s_mov_b32 s33, s3{{$}} -; MESA-DAG: s_mov_b32 s32, s3{{$}} ; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}} ; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i32@rel32@lo+4 ; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i32@rel32@hi+4 ; GCN: v_mov_b32_e32 v0, 42 -; MESA-DAG: s_mov_b32 s4, s33 -; HSA-DAG: s_mov_b32 s4, s9 -; HSA-DAG: s_mov_b32 s32, s9 +; GCN-DAG: s_mov_b32 s4, s33 +; GCN-DAG: s_mov_b32 s32, s33 ; GCN: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}} ; GCN-NEXT: s_endpgm @@ -395,10 +384,11 @@ define amdgpu_kernel void @test_call_external_void_func_v32i32() #0 { } ; GCN-LABEL: {{^}}test_call_external_void_func_v32i32_i32: -; HSA-DAG: s_add_u32 [[SP_REG:s[0-9]+]], s9, 0x100{{$}} +; HSA-DAG: s_mov_b32 s33, s9 +; HSA-DAG: s_add_u32 [[SP_REG:s[0-9]+]], s33, 0x100{{$}} ; MESA-DAG: s_mov_b32 s33, s3{{$}} -; MESA-DAG: s_add_u32 [[SP_REG:s[0-9]+]], s3, 0x100{{$}} +; MESA-DAG: s_add_u32 [[SP_REG:s[0-9]+]], s33, 0x100{{$}} ; GCN-DAG: buffer_load_dword [[VAL1:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} ; GCN-DAG: buffer_load_dwordx4 v[0:3], off @@ -447,29 +437,27 @@ define amdgpu_kernel void @test_call_external_void_func_struct_i8_i32() #0 { } ; GCN-LABEL: {{^}}test_call_external_void_func_byval_struct_i8_i32: -; MESA-DAG: s_add_u32 [[SP:s[0-9]+]], s1, 0x400{{$}} -; HSA-DAG: s_add_u32 [[SP:s[0-9]+]], s7, 0x400{{$}} +; GCN-DAG: s_add_u32 [[SP:s[0-9]+]], s33, 0x400{{$}} ; GCN-DAG: v_mov_b32_e32 [[VAL0:v[0-9]+]], 3 ; GCN-DAG: v_mov_b32_e32 [[VAL1:v[0-9]+]], 8 -; MESA-DAG: buffer_store_byte [[VAL0]], off, s[36:39], s1 offset:8 -; MESA-DAG: buffer_store_dword [[VAL1]], off, s[36:39], s1 offset:12 +; MESA-DAG: buffer_store_byte [[VAL0]], off, s[36:39], s33 offset:8 +; MESA-DAG: buffer_store_dword [[VAL1]], off, s[36:39], s33 offset:12 -; HSA-DAG: s_mov_b32 s33, s7 -; HSA-DAG: buffer_store_byte [[VAL0]], off, s[0:3], s{{7|33}} offset:8 -; HSA-DAG: buffer_store_dword [[VAL1]], off, s[0:3], s{{7|33}} offset:12 +; HSA-DAG: buffer_store_byte [[VAL0]], off, s[0:3], s33 offset:8 +; HSA-DAG: buffer_store_dword [[VAL1]], off, s[0:3], s33 offset:12 ; GCN: s_add_u32 [[SP]], [[SP]], 0x200 -; HSA: buffer_load_dword [[RELOAD_VAL0:v[0-9]+]], off, s[0:3], s{{7|33}} offset:8 -; HSA: buffer_load_dword [[RELOAD_VAL1:v[0-9]+]], off, s[0:3], s{{7|33}} offset:12 +; HSA: buffer_load_dword [[RELOAD_VAL0:v[0-9]+]], off, s[0:3], s33 offset:8 +; HSA: buffer_load_dword [[RELOAD_VAL1:v[0-9]+]], off, s[0:3], s33 offset:12 ; HSA: buffer_store_dword [[RELOAD_VAL1]], off, s[0:3], [[SP]] offset:8 ; HSA: buffer_store_dword [[RELOAD_VAL0]], off, s[0:3], [[SP]] offset:4 -; MESA: buffer_load_dword [[RELOAD_VAL0:v[0-9]+]], off, s[36:39], s1 offset:8 -; MESA: buffer_load_dword [[RELOAD_VAL1:v[0-9]+]], off, s[36:39], s1 offset:12 +; MESA: buffer_load_dword [[RELOAD_VAL0:v[0-9]+]], off, s[36:39], s33 offset:8 +; MESA: buffer_load_dword [[RELOAD_VAL1:v[0-9]+]], off, s[36:39], s33 offset:12 ; MESA: buffer_store_dword [[RELOAD_VAL1]], off, s[36:39], [[SP]] offset:8 ; MESA: buffer_store_dword [[RELOAD_VAL0]], off, s[36:39], [[SP]] offset:4 @@ -502,8 +490,8 @@ define amdgpu_kernel void @test_call_external_void_func_byval_struct_i8_i32() #0 ; GCN: buffer_store_dword [[RELOAD_VAL1]], off, s{{\[[0-9]+:[0-9]+\]}}, [[SP]] offset:8 ; GCN: buffer_store_dword [[RELOAD_VAL0]], off, s{{\[[0-9]+:[0-9]+\]}}, [[SP]] offset:4 ; GCN-NEXT: s_swappc_b64 -; GCN-DAG: buffer_load_ubyte [[LOAD_OUT_VAL0:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, s33 offset:16 -; GCN-DAG: buffer_load_dword [[LOAD_OUT_VAL1:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, s33 offset:20 +; GCN-DAG: buffer_load_ubyte [[LOAD_OUT_VAL0:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, [[FP_REG]] offset:16 +; GCN-DAG: buffer_load_dword [[LOAD_OUT_VAL1:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, [[FP_REG]] offset:20 ; GCN: s_sub_u32 [[SP]], [[SP]], 0x200 ; GCN: buffer_store_byte [[LOAD_OUT_VAL0]], off diff --git a/llvm/test/CodeGen/AMDGPU/call-preserved-registers.ll b/llvm/test/CodeGen/AMDGPU/call-preserved-registers.ll index d8b572cbf020..18122613d43f 100644 --- a/llvm/test/CodeGen/AMDGPU/call-preserved-registers.ll +++ b/llvm/test/CodeGen/AMDGPU/call-preserved-registers.ll @@ -5,12 +5,12 @@ declare void @external_void_func_void() #0 ; GCN-LABEL: {{^}}test_kernel_call_external_void_func_void_clobber_s30_s31_call_external_void_func_void: +; GCN: s_mov_b32 s33, s7 ; GCN: s_getpc_b64 s[34:35] ; GCN-NEXT: s_add_u32 s34, s34, ; GCN-NEXT: s_addc_u32 s35, s35, -; GCN-NEXT: s_mov_b32 s4, s7 -; GCN-NEXT: s_mov_b32 s33, s7 -; GCN-NEXT: s_mov_b32 s32, s7 +; GCN-NEXT: s_mov_b32 s4, s33 +; GCN-NEXT: s_mov_b32 s32, s33 ; GCN: s_swappc_b64 s[30:31], s[34:35] ; GCN-NEXT: s_mov_b32 s4, s33 @@ -112,13 +112,14 @@ define amdgpu_kernel void @test_call_void_func_void_mayclobber_v31(i32 addrspace } ; GCN-LABEL: {{^}}test_call_void_func_void_preserves_s33: +; GCN: s_mov_b32 s34, s9 ; GCN: ; def s33 ; GCN-NEXT: #ASMEND ; GCN: s_getpc_b64 s[6:7] ; GCN-NEXT: s_add_u32 s6, s6, external_void_func_void@rel32@lo+4 ; GCN-NEXT: s_addc_u32 s7, s7, external_void_func_void@rel32@hi+4 -; GCN-NEXT: s_mov_b32 s4, s9 -; GCN-NEXT: s_mov_b32 s32, s9 +; GCN-NEXT: s_mov_b32 s4, s34 +; GCN-NEXT: s_mov_b32 s32, s34 ; GCN-NEXT: s_swappc_b64 s[30:31], s[6:7] ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: ; use s33 @@ -132,13 +133,14 @@ define amdgpu_kernel void @test_call_void_func_void_preserves_s33(i32 addrspace( } ; GCN-LABEL: {{^}}test_call_void_func_void_preserves_v32: +; GCN: s_mov_b32 s33, s9 ; GCN: ; def v32 ; GCN-NEXT: #ASMEND ; GCN: s_getpc_b64 s[6:7] ; GCN-NEXT: s_add_u32 s6, s6, external_void_func_void@rel32@lo+4 ; GCN-NEXT: s_addc_u32 s7, s7, external_void_func_void@rel32@hi+4 -; GCN-NEXT: s_mov_b32 s4, s9 -; GCN-NEXT: s_mov_b32 s32, s9 +; GCN-NEXT: s_mov_b32 s4, s33 +; GCN-NEXT: s_mov_b32 s32, s33 ; GCN-NEXT: s_swappc_b64 s[30:31], s[6:7] ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: ; use v32 @@ -165,11 +167,11 @@ define void @void_func_void_clobber_s33() #2 { ; GCN-LABEL: {{^}}test_call_void_func_void_clobber_s33: ; GCN: s_mov_b32 s33, s7 -; GCN: s_mov_b32 s32, s7 ; GCN: s_getpc_b64 ; GCN-NEXT: s_add_u32 ; GCN-NEXT: s_addc_u32 ; GCN-NEXT: s_mov_b32 s4, s33 +; GCN-NEXT: s_mov_b32 s32, s33 ; GCN: s_swappc_b64 ; GCN-NEXT: s_endpgm define amdgpu_kernel void @test_call_void_func_void_clobber_s33() #0 { diff --git a/llvm/test/CodeGen/AMDGPU/callee-special-input-sgprs.ll b/llvm/test/CodeGen/AMDGPU/callee-special-input-sgprs.ll index 2dc1fa2dd9fa..1518c0e503eb 100644 --- a/llvm/test/CodeGen/AMDGPU/callee-special-input-sgprs.ll +++ b/llvm/test/CodeGen/AMDGPU/callee-special-input-sgprs.ll @@ -191,9 +191,11 @@ define void @use_workgroup_id_yz() #1 { ; GCN: enable_sgpr_workgroup_id_z = 0 ; GCN-NOT: s6 -; GCN: s_mov_b32 s4, s7 +; GCN: s_mov_b32 s33, s7 ; GCN-NOT: s6 -; GCN: s_mov_b32 s32, s7 +; GCN: s_mov_b32 s4, s33 +; GCN-NOT: s6 +; GCN: s_mov_b32 s32, s33 ; GCN: s_swappc_b64 define amdgpu_kernel void @kern_indirect_use_workgroup_id_x() #1 { call void @use_workgroup_id_x() @@ -206,9 +208,9 @@ define amdgpu_kernel void @kern_indirect_use_workgroup_id_x() #1 { ; GCN: enable_sgpr_workgroup_id_z = 0 ; GCN: s_mov_b32 s33, s8 -; GCN: s_mov_b32 s32, s8 ; GCN: s_mov_b32 s4, s33 ; GCN: s_mov_b32 s6, s7 +; GCN: s_mov_b32 s32, s33 ; GCN: s_swappc_b64 define amdgpu_kernel void @kern_indirect_use_workgroup_id_y() #1 { call void @use_workgroup_id_y() @@ -237,10 +239,10 @@ define amdgpu_kernel void @kern_indirect_use_workgroup_id_z() #1 { ; GCN: s_mov_b32 s33, s8 ; GCN-NOT: s6 ; GCN-NOT: s7 -; GCN: s_mov_b32 s32, s8 +; GCN: s_mov_b32 s4, s33 ; GCN-NOT: s6 ; GCN-NOT: s7 -; GCN: s_mov_b32 s4, s33 +; GCN: s_mov_b32 s32, s33 ; GCN-NOT: s6 ; GCN-NOT: s7 ; GCN: s_swappc_b64 @@ -254,17 +256,19 @@ define amdgpu_kernel void @kern_indirect_use_workgroup_id_xy() #1 { ; GCN: enable_sgpr_workgroup_id_y = 1 ; GCN: enable_sgpr_workgroup_id_z = 1 -; GCN-NOT: s6 -; GCN-NOT: s7 -; GCN-NOT: s8 - -; GCN: s_mov_b32 s4, s9 +; GCN: s_mov_b32 s33, s9 ; GCN-NOT: s6 ; GCN-NOT: s7 ; GCN-NOT: s8 -; GCN: s_mov_b32 s32, s9 +; GCN: s_mov_b32 s4, s33 + +; GCN-NOT: s6 +; GCN-NOT: s7 +; GCN-NOT: s8 + +; GCN: s_mov_b32 s32, s33 ; GCN-NOT: s6 ; GCN-NOT: s7 @@ -285,11 +289,11 @@ define amdgpu_kernel void @kern_indirect_use_workgroup_id_xyz() #1 { ; GCN-NOT: s6 ; GCN-NOT: s7 -; GCN: s_mov_b32 s32, s8 +; GCN: s_mov_b32 s4, s33 ; GCN-NOT: s6 ; GCN-NOT: s7 -; GCN: s_mov_b32 s4, s33 +; GCN: s_mov_b32 s32, s33 ; GCN-NOT: s6 ; GCN-NOT: s7 @@ -304,10 +308,11 @@ define amdgpu_kernel void @kern_indirect_use_workgroup_id_xz() #1 { ; GCN: enable_sgpr_workgroup_id_y = 1 ; GCN: enable_sgpr_workgroup_id_z = 1 +; GCN: s_mov_b32 s33, s9 ; GCN: s_mov_b32 s6, s7 -; GCN: s_mov_b32 s4, s9 +; GCN: s_mov_b32 s4, s33 ; GCN: s_mov_b32 s7, s8 -; GCN: s_mov_b32 s32, s9 +; GCN: s_mov_b32 s32, s33 ; GCN: s_swappc_b64 define amdgpu_kernel void @kern_indirect_use_workgroup_id_yz() #1 { call void @use_workgroup_id_yz() @@ -371,12 +376,13 @@ define void @other_arg_use_workgroup_id_z(i32 %arg0) #1 { ; GCN: enable_sgpr_workgroup_id_y = 0 ; GCN: enable_sgpr_workgroup_id_z = 0 +; GCN-DAG: s_mov_b32 s33, s7 ; GCN-DAG: v_mov_b32_e32 v0, 0x22b ; GCN-NOT: s6 -; GCN: s_mov_b32 s4, s7 +; GCN: s_mov_b32 s4, s33 ; GCN-NOT: s6 -; GCN-DAG: s_mov_b32 s32, s7 +; GCN-DAG: s_mov_b32 s32, s33 ; GCN: s_swappc_b64 define amdgpu_kernel void @kern_indirect_other_arg_use_workgroup_id_x() #1 { call void @other_arg_use_workgroup_id_x(i32 555) @@ -389,10 +395,10 @@ define amdgpu_kernel void @kern_indirect_other_arg_use_workgroup_id_x() #1 { ; GCN: enable_sgpr_workgroup_id_z = 0 ; GCN-DAG: s_mov_b32 s33, s8 -; GCN-DAG: s_mov_b32 s32, s8 ; GCN-DAG: v_mov_b32_e32 v0, 0x22b ; GCN: s_mov_b32 s4, s33 ; GCN-DAG: s_mov_b32 s6, s7 +; GCN-DAG: s_mov_b32 s32, s33 ; GCN: s_swappc_b64 define amdgpu_kernel void @kern_indirect_other_arg_use_workgroup_id_y() #1 { call void @other_arg_use_workgroup_id_y(i32 555) @@ -405,11 +411,11 @@ define amdgpu_kernel void @kern_indirect_other_arg_use_workgroup_id_y() #1 { ; GCN: enable_sgpr_workgroup_id_z = 1 ; GCN: s_mov_b32 s33, s8 -; GCN: s_mov_b32 s32, s8 ; GCN-DAG: v_mov_b32_e32 v0, 0x22b ; GCN: s_mov_b32 s4, s33 ; GCN-DAG: s_mov_b32 s6, s7 +; GCN: s_mov_b32 s32, s33 ; GCN: s_swappc_b64 define amdgpu_kernel void @kern_indirect_other_arg_use_workgroup_id_z() #1 { call void @other_arg_use_workgroup_id_z(i32 555) @@ -469,12 +475,13 @@ define void @use_every_sgpr_input() #1 { ; GCN: enable_sgpr_dispatch_id = 1 ; GCN: enable_sgpr_flat_scratch_init = 1 +; GCN: s_mov_b32 s33, s17 ; GCN: s_mov_b64 s[12:13], s[10:11] ; GCN: s_mov_b64 s[10:11], s[8:9] ; GCN: s_mov_b64 s[8:9], s[6:7] ; GCN: s_mov_b64 s[6:7], s[4:5] -; GCN: s_mov_b32 s4, s17 -; GCN: s_mov_b32 s32, s17 +; GCN: s_mov_b32 s4, s33 +; GCN: s_mov_b32 s32, s33 ; GCN: s_swappc_b64 define amdgpu_kernel void @kern_indirect_use_every_sgpr_input() #1 { call void @use_every_sgpr_input() @@ -540,18 +547,16 @@ define void @func_use_every_sgpr_input_call_use_workgroup_id_xyz() #1 { ; GCN: s_mov_b32 s5, s32 ; GCN: s_add_u32 s32, s32, 0x300 -; GCN: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[6:7] -; GCN: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[10:11] -; GCN: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[8:9] - -; GCN: s_mov_b32 s6, s14 -; GCN: s_mov_b32 s7, s15 -; GCN: s_mov_b32 s8, s16 - -; GCN: s_mov_b32 [[SAVE_Z:s[0-9]+]], s16 -; GCN: s_mov_b32 [[SAVE_Y:s[0-9]+]], s15 -; GCN: s_mov_b32 [[SAVE_X:s[0-9]+]], s14 +; GCN-DAG: s_mov_b32 [[SAVE_X:s[0-9]+]], s14 +; GCN-DAG: s_mov_b32 [[SAVE_Y:s[0-9]+]], s15 +; GCN-DAG: s_mov_b32 [[SAVE_Z:s[0-9]+]], s16 +; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[6:7] +; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[8:9] +; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[10:11] +; GCN-DAG: s_mov_b32 s6, [[SAVE_X]] +; GCN-DAG: s_mov_b32 s7, [[SAVE_Y]] +; GCN-DAG: s_mov_b32 s8, [[SAVE_Z]] ; GCN: s_swappc_b64 ; GCN: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s5 offset:4 diff --git a/llvm/test/CodeGen/AMDGPU/callee-special-input-vgprs.ll b/llvm/test/CodeGen/AMDGPU/callee-special-input-vgprs.ll index f166d08935d5..50da6b44625c 100644 --- a/llvm/test/CodeGen/AMDGPU/callee-special-input-vgprs.ll +++ b/llvm/test/CodeGen/AMDGPU/callee-special-input-vgprs.ll @@ -288,8 +288,8 @@ define void @too_many_args_use_workitem_id_x( ; GCN-LABEL: {{^}}kern_call_too_many_args_use_workitem_id_x: ; GCN: enable_vgpr_workitem_id = 0 -; GCN: s_mov_b32 s32, s7 ; GCN: s_mov_b32 s33, s7 +; GCN: s_mov_b32 s32, s33 ; GCN: buffer_store_dword v0, off, s[0:3], s32 offset:8 ; GCN: s_mov_b32 s4, s33 ; GCN: s_swappc_b64 @@ -422,16 +422,15 @@ define void @too_many_args_use_workitem_id_x_byval( ; GCN-LABEL: {{^}}kern_call_too_many_args_use_workitem_id_x_byval: ; GCN: enable_vgpr_workitem_id = 0 -; GCN: s_add_u32 s32, s7, 0x200{{$}} -; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0x3e7{{$}} -; GCN: s_add_u32 s32, s32, 0x100{{$}} - - -; GCN: buffer_store_dword [[K]], off, s[0:3], s7 offset:4 -; GCN: buffer_store_dword v0, off, s[0:3], s32 offset:12 -; GCN: buffer_load_dword [[RELOAD_BYVAL:v[0-9]+]], off, s[0:3], s7 offset:4 - ; GCN: s_mov_b32 s33, s7 +; GCN: s_add_u32 s32, s33, 0x200{{$}} + +; GCN-DAG: s_add_u32 s32, s32, 0x100{{$}} +; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x3e7{{$}} +; GCN: buffer_store_dword [[K]], off, s[0:3], s33 offset:4 +; GCN: buffer_store_dword v0, off, s[0:3], s32 offset:12 + +; GCN: buffer_load_dword [[RELOAD_BYVAL:v[0-9]+]], off, s[0:3], s33 offset:4 ; GCN: buffer_store_dword [[RELOAD_BYVAL]], off, s[0:3], s32 offset:4{{$}} ; GCN: v_mov_b32_e32 [[RELOAD_BYVAL]], ; GCN: s_swappc_b64 @@ -549,8 +548,8 @@ define void @too_many_args_use_workitem_id_xyz( ; GCN-LABEL: {{^}}kern_call_too_many_args_use_workitem_id_xyz: ; GCN: enable_vgpr_workitem_id = 2 -; GCN: s_mov_b32 s32, s7 ; GCN: s_mov_b32 s33, s7 +; GCN: s_mov_b32 s32, s33 ; GCN-DAG: buffer_store_dword v0, off, s[0:3], s32 offset:8 ; GCN-DAG: buffer_store_dword v1, off, s[0:3], s32 offset:12 @@ -644,8 +643,8 @@ define void @too_many_args_use_workitem_id_x_stack_yz( ; GCN-LABEL: {{^}}kern_call_too_many_args_use_workitem_id_x_stack_yz: ; GCN: enable_vgpr_workitem_id = 2 -; GCN: s_mov_b32 s32, s7 ; GCN: s_mov_b32 s33, s7 +; GCN: s_mov_b32 s32, s33 ; GCN-DAG: v_mov_b32_e32 v31, v0 ; GCN-DAG: buffer_store_dword v1, off, s[0:3], s32 offset:8 diff --git a/llvm/test/CodeGen/AMDGPU/mubuf-offset-private.ll b/llvm/test/CodeGen/AMDGPU/mubuf-offset-private.ll index 17f8c1a55a18..742c4f8af85d 100644 --- a/llvm/test/CodeGen/AMDGPU/mubuf-offset-private.ll +++ b/llvm/test/CodeGen/AMDGPU/mubuf-offset-private.ll @@ -5,49 +5,49 @@ ; Test addressing modes when the scratch base is not a frame index. ; GCN-LABEL: {{^}}store_private_offset_i8: -; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s1 offset:8 +; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s2 offset:8 define amdgpu_kernel void @store_private_offset_i8() #0 { store volatile i8 5, i8* inttoptr (i32 8 to i8*) ret void } ; GCN-LABEL: {{^}}store_private_offset_i16: -; GCN: buffer_store_short v{{[0-9]+}}, off, s[4:7], s1 offset:8 +; GCN: buffer_store_short v{{[0-9]+}}, off, s[4:7], s2 offset:8 define amdgpu_kernel void @store_private_offset_i16() #0 { store volatile i16 5, i16* inttoptr (i32 8 to i16*) ret void } ; GCN-LABEL: {{^}}store_private_offset_i32: -; GCN: buffer_store_dword v{{[0-9]+}}, off, s[4:7], s1 offset:8 +; GCN: buffer_store_dword v{{[0-9]+}}, off, s[4:7], s2 offset:8 define amdgpu_kernel void @store_private_offset_i32() #0 { store volatile i32 5, i32* inttoptr (i32 8 to i32*) ret void } ; GCN-LABEL: {{^}}store_private_offset_v2i32: -; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s1 offset:8 +; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8 define amdgpu_kernel void @store_private_offset_v2i32() #0 { store volatile <2 x i32> , <2 x i32>* inttoptr (i32 8 to <2 x i32>*) ret void } ; GCN-LABEL: {{^}}store_private_offset_v4i32: -; GCN: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s1 offset:8 +; GCN: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8 define amdgpu_kernel void @store_private_offset_v4i32() #0 { store volatile <4 x i32> , <4 x i32>* inttoptr (i32 8 to <4 x i32>*) ret void } ; GCN-LABEL: {{^}}load_private_offset_i8: -; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], s1 offset:8 +; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], s2 offset:8 define amdgpu_kernel void @load_private_offset_i8() #0 { %load = load volatile i8, i8* inttoptr (i32 8 to i8*) ret void } ; GCN-LABEL: {{^}}sextload_private_offset_i8: -; GCN: buffer_load_sbyte v{{[0-9]+}}, off, s[4:7], s3 offset:8 +; GCN: buffer_load_sbyte v{{[0-9]+}}, off, s[4:7], s8 offset:8 define amdgpu_kernel void @sextload_private_offset_i8(i32 addrspace(1)* %out) #0 { %load = load volatile i8, i8* inttoptr (i32 8 to i8*) %sextload = sext i8 %load to i32 @@ -56,7 +56,7 @@ define amdgpu_kernel void @sextload_private_offset_i8(i32 addrspace(1)* %out) #0 } ; GCN-LABEL: {{^}}zextload_private_offset_i8: -; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], s3 offset:8 +; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], s8 offset:8 define amdgpu_kernel void @zextload_private_offset_i8(i32 addrspace(1)* %out) #0 { %load = load volatile i8, i8* inttoptr (i32 8 to i8*) %zextload = zext i8 %load to i32 @@ -65,14 +65,14 @@ define amdgpu_kernel void @zextload_private_offset_i8(i32 addrspace(1)* %out) #0 } ; GCN-LABEL: {{^}}load_private_offset_i16: -; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], s1 offset:8 +; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], s2 offset:8 define amdgpu_kernel void @load_private_offset_i16() #0 { %load = load volatile i16, i16* inttoptr (i32 8 to i16*) ret void } ; GCN-LABEL: {{^}}sextload_private_offset_i16: -; GCN: buffer_load_sshort v{{[0-9]+}}, off, s[4:7], s3 offset:8 +; GCN: buffer_load_sshort v{{[0-9]+}}, off, s[4:7], s8 offset:8 define amdgpu_kernel void @sextload_private_offset_i16(i32 addrspace(1)* %out) #0 { %load = load volatile i16, i16* inttoptr (i32 8 to i16*) %sextload = sext i16 %load to i32 @@ -81,7 +81,7 @@ define amdgpu_kernel void @sextload_private_offset_i16(i32 addrspace(1)* %out) # } ; GCN-LABEL: {{^}}zextload_private_offset_i16: -; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], s3 offset:8 +; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], s8 offset:8 define amdgpu_kernel void @zextload_private_offset_i16(i32 addrspace(1)* %out) #0 { %load = load volatile i16, i16* inttoptr (i32 8 to i16*) %zextload = zext i16 %load to i32 @@ -90,28 +90,28 @@ define amdgpu_kernel void @zextload_private_offset_i16(i32 addrspace(1)* %out) # } ; GCN-LABEL: {{^}}load_private_offset_i32: -; GCN: buffer_load_dword v{{[0-9]+}}, off, s[4:7], s1 offset:8 +; GCN: buffer_load_dword v{{[0-9]+}}, off, s[4:7], s2 offset:8 define amdgpu_kernel void @load_private_offset_i32() #0 { %load = load volatile i32, i32* inttoptr (i32 8 to i32*) ret void } ; GCN-LABEL: {{^}}load_private_offset_v2i32: -; GCN: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s1 offset:8 +; GCN: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8 define amdgpu_kernel void @load_private_offset_v2i32() #0 { %load = load volatile <2 x i32>, <2 x i32>* inttoptr (i32 8 to <2 x i32>*) ret void } ; GCN-LABEL: {{^}}load_private_offset_v4i32: -; GCN: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s1 offset:8 +; GCN: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8 define amdgpu_kernel void @load_private_offset_v4i32() #0 { %load = load volatile <4 x i32>, <4 x i32>* inttoptr (i32 8 to <4 x i32>*) ret void } ; GCN-LABEL: {{^}}store_private_offset_i8_max_offset: -; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s1 offset:4095 +; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s2 offset:4095 define amdgpu_kernel void @store_private_offset_i8_max_offset() #0 { store volatile i8 5, i8* inttoptr (i32 4095 to i8*) ret void @@ -119,7 +119,7 @@ define amdgpu_kernel void @store_private_offset_i8_max_offset() #0 { ; GCN-LABEL: {{^}}store_private_offset_i8_max_offset_plus1: ; GCN: v_mov_b32_e32 [[OFFSET:v[0-9]+]], 0x1000 -; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s1 offen{{$}} +; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s2 offen{{$}} define amdgpu_kernel void @store_private_offset_i8_max_offset_plus1() #0 { store volatile i8 5, i8* inttoptr (i32 4096 to i8*) ret void @@ -127,7 +127,7 @@ define amdgpu_kernel void @store_private_offset_i8_max_offset_plus1() #0 { ; GCN-LABEL: {{^}}store_private_offset_i8_max_offset_plus2: ; GCN: v_mov_b32_e32 [[OFFSET:v[0-9]+]], 0x1000 -; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s1 offen offset:1{{$}} +; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s2 offen offset:1{{$}} define amdgpu_kernel void @store_private_offset_i8_max_offset_plus2() #0 { store volatile i8 5, i8* inttoptr (i32 4097 to i8*) ret void diff --git a/llvm/test/CodeGen/AMDGPU/multilevel-break.ll b/llvm/test/CodeGen/AMDGPU/multilevel-break.ll index dc3398449e0f..15de689b953e 100644 --- a/llvm/test/CodeGen/AMDGPU/multilevel-break.ll +++ b/llvm/test/CodeGen/AMDGPU/multilevel-break.ll @@ -78,7 +78,7 @@ ENDIF: ; preds = %LOOP ; Uses a copy intsead of an or ; GCN: s_mov_b64 [[COPY:s\[[0-9]+:[0-9]+\]]], [[BREAK_REG]] -; GCN: s_or_b64 [[BREAK_REG]], exec, [[BREAK_REG]] +; GCN: s_or_b64 [[BREAK_REG]], exec, [[COPY]] define amdgpu_kernel void @multi_if_break_loop(i32 %arg) #0 { bb: %id = call i32 @llvm.amdgcn.workitem.id.x() diff --git a/llvm/test/CodeGen/AMDGPU/private-access-no-objects.ll b/llvm/test/CodeGen/AMDGPU/private-access-no-objects.ll index 0b80fc33531a..cf0c7944d4cd 100644 --- a/llvm/test/CodeGen/AMDGPU/private-access-no-objects.ll +++ b/llvm/test/CodeGen/AMDGPU/private-access-no-objects.ll @@ -1,6 +1,6 @@ ; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI -check-prefix=OPT %s ; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=CI -check-prefix=OPT %s -; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=iceland -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI -check-prefix=OPTICELAND %s +; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=iceland -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI -check-prefix=OPT %s ; RUN: llc -O0 -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=OPTNONE %s ; There are no stack objects, but still a private memory access. The @@ -8,12 +8,10 @@ ; shifted down to the end of the used registers. ; GCN-LABEL: {{^}}store_to_undef: -; OPT: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s5 offen{{$}} -; The -mcpu=iceland case doesn't copy-propagate the same as the other two opt cases because the temp registers %SGPR88_SGPR89_SGPR90_SGPR91 and %SGPR93 are marked as non-allocatable by this subtarget. -; OPTICELAND-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1] -; OPTICELAND-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3] -; OPTICELAND-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s5{{$}} -; OPTICELAND: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offen{{$}} +; OPT-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1] +; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3] +; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s5{{$}} +; OPT: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offen{{$}} ; -O0 should assume spilling, so the input scratch resource descriptor ; -should be used directly without any copies. @@ -26,21 +24,30 @@ define amdgpu_kernel void @store_to_undef() #0 { } ; GCN-LABEL: {{^}}store_to_inttoptr: -; OPT: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s5 offset:124{{$}} +; OPT-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1] +; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3] +; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s5{{$}} +; OPT: buffer_store_dword v{{[0-9]+}}, off, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offset:124{{$}} define amdgpu_kernel void @store_to_inttoptr() #0 { store volatile i32 0, i32* inttoptr (i32 124 to i32*) ret void } ; GCN-LABEL: {{^}}load_from_undef: -; OPT: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s5 offen{{$}} +; OPT-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1] +; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3] +; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s5{{$}} +; OPT: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offen{{$}} define amdgpu_kernel void @load_from_undef() #0 { %ld = load volatile i32, i32* undef ret void } ; GCN-LABEL: {{^}}load_from_inttoptr: -; OPT: buffer_load_dword v{{[0-9]+}}, off, s[0:3], s5 offset:124{{$}} +; OPT-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1] +; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3] +; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s5{{$}} +; OPT: buffer_load_dword v{{[0-9]+}}, off, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offset:124{{$}} define amdgpu_kernel void @load_from_inttoptr() #0 { %ld = load volatile i32, i32* inttoptr (i32 124 to i32*) ret void diff --git a/llvm/test/CodeGen/AMDGPU/ret.ll b/llvm/test/CodeGen/AMDGPU/ret.ll index 2d673a9b0cdb..831c71dff79d 100644 --- a/llvm/test/CodeGen/AMDGPU/ret.ll +++ b/llvm/test/CodeGen/AMDGPU/ret.ll @@ -2,10 +2,10 @@ ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s ; GCN-LABEL: {{^}}vgpr: -; GCN-DAG: v_mov_b32_e32 v1, v0 -; GCN-DAG: exp mrt0 v0, v0, v0, v0 done vm +; GCN: v_mov_b32_e32 v1, v0 +; GCN-DAG: v_add_f32_e32 v0, 1.0, v1 +; GCN-DAG: exp mrt0 v1, v1, v1, v1 done vm ; GCN: s_waitcnt expcnt(0) -; GCN: v_add_f32_e32 v0, 1.0, v0 ; GCN-NOT: s_endpgm define amdgpu_vs { float, float } @vgpr([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 { bb: @@ -204,13 +204,13 @@ bb: } ; GCN-LABEL: {{^}}both: -; GCN-DAG: exp mrt0 v0, v0, v0, v0 done vm -; GCN-DAG: v_mov_b32_e32 v1, v0 -; GCN-DAG: s_mov_b32 s1, s2 -; GCN: s_waitcnt expcnt(0) -; GCN: v_add_f32_e32 v0, 1.0, v0 +; GCN: v_mov_b32_e32 v1, v0 +; GCN-DAG: exp mrt0 v1, v1, v1, v1 done vm +; GCN-DAG: v_add_f32_e32 v0, 1.0, v1 ; GCN-DAG: s_add_i32 s0, s3, 2 -; GCN-DAG: s_mov_b32 s2, s3 +; GCN-DAG: s_mov_b32 s1, s2 +; GCN: s_mov_b32 s2, s3 +; GCN: s_waitcnt expcnt(0) ; GCN-NOT: s_endpgm define amdgpu_vs { float, i32, float, i32, i32 } @both([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 { bb: diff --git a/llvm/test/CodeGen/ARM/atomic-op.ll b/llvm/test/CodeGen/ARM/atomic-op.ll index 2337e835480d..644a7fbf8d9a 100644 --- a/llvm/test/CodeGen/ARM/atomic-op.ll +++ b/llvm/test/CodeGen/ARM/atomic-op.ll @@ -287,8 +287,7 @@ define i32 @test_cmpxchg_fail_order(i32 *%addr, i32 %desired, i32 %new) { %pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst monotonic %oldval = extractvalue { i32, i1 } %pair, 0 -; CHECK-ARMV7: mov r[[ADDR:[0-9]+]], r0 -; CHECK-ARMV7: ldrex [[OLDVAL:r[0-9]+]], [r0] +; CHECK-ARMV7: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]] ; CHECK-ARMV7: cmp [[OLDVAL]], r1 ; CHECK-ARMV7: bne [[FAIL_BB:\.?LBB[0-9]+_[0-9]+]] ; CHECK-ARMV7: dmb ish @@ -306,8 +305,7 @@ define i32 @test_cmpxchg_fail_order(i32 *%addr, i32 %desired, i32 %new) { ; CHECK-ARMV7: dmb ish ; CHECK-ARMV7: bx lr -; CHECK-T2: mov r[[ADDR:[0-9]+]], r0 -; CHECK-T2: ldrex [[OLDVAL:r[0-9]+]], [r0] +; CHECK-T2: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]] ; CHECK-T2: cmp [[OLDVAL]], r1 ; CHECK-T2: bne [[FAIL_BB:\.?LBB.*]] ; CHECK-T2: dmb ish diff --git a/llvm/test/CodeGen/ARM/swifterror.ll b/llvm/test/CodeGen/ARM/swifterror.ll index 15a831a37851..b02adf7912b5 100644 --- a/llvm/test/CodeGen/ARM/swifterror.ll +++ b/llvm/test/CodeGen/ARM/swifterror.ll @@ -181,7 +181,7 @@ define float @foo_loop(%swift_error** swifterror %error_ptr_ref, i32 %cc, float ; CHECK-APPLE: beq ; CHECK-APPLE: mov r0, #16 ; CHECK-APPLE: malloc -; CHECK-APPLE: strb r{{.*}}, [r0, #8] +; CHECK-APPLE: strb r{{.*}}, [{{.*}}[[ID]], #8] ; CHECK-APPLE: ble ; CHECK-APPLE: mov r8, [[ID]] diff --git a/llvm/test/CodeGen/Mips/llvm-ir/sub.ll b/llvm/test/CodeGen/Mips/llvm-ir/sub.ll index 8cf2d9eeb8e0..655addb10a64 100644 --- a/llvm/test/CodeGen/Mips/llvm-ir/sub.ll +++ b/llvm/test/CodeGen/Mips/llvm-ir/sub.ll @@ -165,7 +165,7 @@ entry: ; MMR3: subu16 $5, $[[T19]], $[[T20]] ; MMR6: move $[[T0:[0-9]+]], $7 -; MMR6: sw $7, 8($sp) +; MMR6: sw $[[T0]], 8($sp) ; MMR6: move $[[T1:[0-9]+]], $5 ; MMR6: sw $4, 12($sp) ; MMR6: lw $[[T2:[0-9]+]], 48($sp) diff --git a/llvm/test/CodeGen/PowerPC/fma-mutate.ll b/llvm/test/CodeGen/PowerPC/fma-mutate.ll index 1d4695b31810..633afa45115a 100644 --- a/llvm/test/CodeGen/PowerPC/fma-mutate.ll +++ b/llvm/test/CodeGen/PowerPC/fma-mutate.ll @@ -14,8 +14,7 @@ define double @foo3(double %a) nounwind { ret double %r ; CHECK: @foo3 -; CHECK: fmr [[REG:[0-9]+]], [[REG2:[0-9]+]] -; CHECK: xsnmsubadp [[REG]], {{[0-9]+}}, [[REG2]] +; CHECK: xsnmsubadp [[REG:[0-9]+]], {{[0-9]+}}, [[REG]] ; CHECK: xsmaddmdp ; CHECK: xsmaddadp } diff --git a/llvm/test/CodeGen/PowerPC/inlineasm-i64-reg.ll b/llvm/test/CodeGen/PowerPC/inlineasm-i64-reg.ll index d06edd66b246..aa944a8d4646 100644 --- a/llvm/test/CodeGen/PowerPC/inlineasm-i64-reg.ll +++ b/llvm/test/CodeGen/PowerPC/inlineasm-i64-reg.ll @@ -75,7 +75,7 @@ entry: ; CHECK-DAG: mr [[REG:[0-9]+]], 3 ; CHECK-DAG: li 0, 1076 -; CHECK-DAG: stw 3, +; CHECK: stw [[REG]], ; CHECK: #APP ; CHECK: sc diff --git a/llvm/test/CodeGen/PowerPC/tail-dup-layout.ll b/llvm/test/CodeGen/PowerPC/tail-dup-layout.ll index dfeeb3726b95..9665901e874f 100644 --- a/llvm/test/CodeGen/PowerPC/tail-dup-layout.ll +++ b/llvm/test/CodeGen/PowerPC/tail-dup-layout.ll @@ -23,7 +23,7 @@ target triple = "powerpc64le-grtev4-linux-gnu" ;CHECK-LABEL: straight_test: ; test1 may have been merged with entry ;CHECK: mr [[TAGREG:[0-9]+]], 3 -;CHECK: andi. {{[0-9]+}}, [[TAGREG:[0-9]+]], 1 +;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1 ;CHECK-NEXT: bc 12, 1, .[[OPT1LABEL:[_0-9A-Za-z]+]] ;CHECK-NEXT: # %test2 ;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30 diff --git a/llvm/test/CodeGen/SPARC/32abi.ll b/llvm/test/CodeGen/SPARC/32abi.ll index 985e77b86fd7..3807f84d4e92 100644 --- a/llvm/test/CodeGen/SPARC/32abi.ll +++ b/llvm/test/CodeGen/SPARC/32abi.ll @@ -156,9 +156,9 @@ define double @floatarg(double %a0, ; %i0,%i1 ; HARD-NEXT: std %o0, [%sp+96] ; HARD-NEXT: st %o1, [%sp+92] ; HARD-NEXT: mov %i0, %o2 -; HARD-NEXT: mov %i1, %o3 +; HARD-NEXT: mov %o0, %o3 ; HARD-NEXT: mov %o1, %o4 -; HARD-NEXT: mov %i1, %o5 +; HARD-NEXT: mov %o0, %o5 ; HARD-NEXT: call floatarg ; HARD: std %f0, [%i4] ; SOFT: st %i0, [%sp+104] diff --git a/llvm/test/CodeGen/SPARC/atomics.ll b/llvm/test/CodeGen/SPARC/atomics.ll index ac095e60fa06..5e608e728c37 100644 --- a/llvm/test/CodeGen/SPARC/atomics.ll +++ b/llvm/test/CodeGen/SPARC/atomics.ll @@ -235,9 +235,8 @@ entry: ; CHECK-LABEL: test_load_add_i32 ; CHECK: membar -; CHECK: mov [[U:%[gilo][0-7]]], [[V:%[gilo][0-7]]] -; CHECK: add [[U:%[gilo][0-7]]], %o1, [[V2:%[gilo][0-7]]] -; CHECK: cas [%o0], [[V]], [[V2]] +; CHECK: add [[V:%[gilo][0-7]]], %o1, [[U:%[gilo][0-7]]] +; CHECK: cas [%o0], [[V]], [[U]] ; CHECK: membar define zeroext i32 @test_load_add_i32(i32* %p, i32 zeroext %v) { entry: diff --git a/llvm/test/CodeGen/Thumb/thumb-shrink-wrapping.ll b/llvm/test/CodeGen/Thumb/thumb-shrink-wrapping.ll index 0a4aad3cc69b..c571e351a1ef 100644 --- a/llvm/test/CodeGen/Thumb/thumb-shrink-wrapping.ll +++ b/llvm/test/CodeGen/Thumb/thumb-shrink-wrapping.ll @@ -598,7 +598,7 @@ declare void @abort() #0 define i32 @b_to_bx(i32 %value) { ; CHECK-LABEL: b_to_bx: ; DISABLE: push {r7, lr} -; CHECK: cmp r0, #49 +; CHECK: cmp r1, #49 ; CHECK-NEXT: bgt [[ELSE_LABEL:LBB[0-9_]+]] ; ENABLE: push {r7, lr} diff --git a/llvm/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll b/llvm/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll index 31c6f1041f79..ca3eb9cda372 100644 --- a/llvm/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll +++ b/llvm/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll @@ -7,7 +7,7 @@ define i32 @f(i32 %a, i32 %b) { ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx ; CHECK-NEXT: movl %ecx, %edx -; CHECK-NEXT: imull %ecx, %edx +; CHECK-NEXT: imull %edx, %edx ; CHECK-NEXT: imull %eax, %ecx ; CHECK-NEXT: imull %eax, %eax ; CHECK-NEXT: addl %edx, %eax diff --git a/llvm/test/CodeGen/X86/arg-copy-elide.ll b/llvm/test/CodeGen/X86/arg-copy-elide.ll index 603e50ff30a3..126f5a1c7976 100644 --- a/llvm/test/CodeGen/X86/arg-copy-elide.ll +++ b/llvm/test/CodeGen/X86/arg-copy-elide.ll @@ -106,7 +106,7 @@ entry: ; CHECK-DAG: movl %edx, %[[r1:[^ ]*]] ; CHECK-DAG: movl 8(%ebp), %[[r2:[^ ]*]] ; CHECK-DAG: movl %[[r2]], 4(%esp) -; CHECK-DAG: movl %edx, (%esp) +; CHECK-DAG: movl %[[r1]], (%esp) ; CHECK: movl %esp, %[[reg:[^ ]*]] ; CHECK: pushl %[[reg]] ; CHECK: calll _addrof_i64 diff --git a/llvm/test/CodeGen/X86/avg.ll b/llvm/test/CodeGen/X86/avg.ll index 7da8502ac092..a2454aa5a048 100644 --- a/llvm/test/CodeGen/X86/avg.ll +++ b/llvm/test/CodeGen/X86/avg.ll @@ -407,6 +407,7 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) { ; SSE2-NEXT: pand %xmm0, %xmm2 ; SSE2-NEXT: packuswb %xmm1, %xmm2 ; SSE2-NEXT: packuswb %xmm10, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm1 ; SSE2-NEXT: psrld $1, %xmm4 ; SSE2-NEXT: psrld $1, %xmm12 ; SSE2-NEXT: pand %xmm0, %xmm12 @@ -443,7 +444,7 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) { ; SSE2-NEXT: movdqu %xmm7, (%rax) ; SSE2-NEXT: movdqu %xmm11, (%rax) ; SSE2-NEXT: movdqu %xmm13, (%rax) -; SSE2-NEXT: movdqu %xmm2, (%rax) +; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v64i8: diff --git a/llvm/test/CodeGen/X86/avx-load-store.ll b/llvm/test/CodeGen/X86/avx-load-store.ll index b1802c710fa7..e2dab79e6f19 100644 --- a/llvm/test/CodeGen/X86/avx-load-store.ll +++ b/llvm/test/CodeGen/X86/avx-load-store.ll @@ -12,11 +12,11 @@ define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>* ; CHECK-NEXT: movq %rdx, %r14 ; CHECK-NEXT: movq %rsi, %r15 ; CHECK-NEXT: movq %rdi, %rbx -; CHECK-NEXT: vmovaps (%rdi), %ymm0 +; CHECK-NEXT: vmovaps (%rbx), %ymm0 ; CHECK-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill -; CHECK-NEXT: vmovaps (%rsi), %ymm1 +; CHECK-NEXT: vmovaps (%r15), %ymm1 ; CHECK-NEXT: vmovups %ymm1, {{[0-9]+}}(%rsp) # 32-byte Spill -; CHECK-NEXT: vmovaps (%rdx), %ymm2 +; CHECK-NEXT: vmovaps (%r14), %ymm2 ; CHECK-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill ; CHECK-NEXT: callq dummy ; CHECK-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload diff --git a/llvm/test/CodeGen/X86/avx512-bugfix-25270.ll b/llvm/test/CodeGen/X86/avx512-bugfix-25270.ll index 36e0354c1cae..47384fa98843 100644 --- a/llvm/test/CodeGen/X86/avx512-bugfix-25270.ll +++ b/llvm/test/CodeGen/X86/avx512-bugfix-25270.ll @@ -9,10 +9,10 @@ define void @bar__512(<16 x i32>* %var) #0 { ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: subq $112, %rsp ; CHECK-NEXT: movq %rdi, %rbx -; CHECK-NEXT: vmovups (%rdi), %zmm0 +; CHECK-NEXT: vmovups (%rbx), %zmm0 ; CHECK-NEXT: vmovups %zmm0, (%rsp) ## 64-byte Spill ; CHECK-NEXT: vbroadcastss {{.*}}(%rip), %zmm1 -; CHECK-NEXT: vmovaps %zmm1, (%rdi) +; CHECK-NEXT: vmovaps %zmm1, (%rbx) ; CHECK-NEXT: callq _Print__512 ; CHECK-NEXT: vmovups (%rsp), %zmm0 ## 64-byte Reload ; CHECK-NEXT: callq _Print__512 diff --git a/llvm/test/CodeGen/X86/avx512-calling-conv.ll b/llvm/test/CodeGen/X86/avx512-calling-conv.ll index e1d602f5775f..138b8750633c 100644 --- a/llvm/test/CodeGen/X86/avx512-calling-conv.ll +++ b/llvm/test/CodeGen/X86/avx512-calling-conv.ll @@ -466,7 +466,7 @@ define i32 @test12(i32 %a1, i32 %a2, i32 %b1) { ; KNL_X32-NEXT: movl %edi, (%esp) ; KNL_X32-NEXT: calll _test11 ; KNL_X32-NEXT: movl %eax, %ebx -; KNL_X32-NEXT: movzbl %al, %eax +; KNL_X32-NEXT: movzbl %bl, %eax ; KNL_X32-NEXT: movl %eax, {{[0-9]+}}(%esp) ; KNL_X32-NEXT: movl %esi, {{[0-9]+}}(%esp) ; KNL_X32-NEXT: movl %edi, (%esp) diff --git a/llvm/test/CodeGen/X86/avx512-mask-op.ll b/llvm/test/CodeGen/X86/avx512-mask-op.ll index 805ea6bf152d..1ae57c613cda 100644 --- a/llvm/test/CodeGen/X86/avx512-mask-op.ll +++ b/llvm/test/CodeGen/X86/avx512-mask-op.ll @@ -1171,6 +1171,7 @@ define <8 x i1> @test18(i8 %a, i16 %y) { ; KNL-NEXT: kmovw %esi, %k0 ; KNL-NEXT: kshiftlw $7, %k0, %k2 ; KNL-NEXT: kshiftrw $15, %k2, %k2 +; KNL-NEXT: kmovw %k2, %eax ; KNL-NEXT: kshiftlw $6, %k0, %k0 ; KNL-NEXT: kshiftrw $15, %k0, %k0 ; KNL-NEXT: kmovw %k0, %ecx @@ -1183,7 +1184,8 @@ define <8 x i1> @test18(i8 %a, i16 %y) { ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0 ; KNL-NEXT: kshiftlw $1, %k0, %k0 ; KNL-NEXT: kshiftrw $1, %k0, %k0 -; KNL-NEXT: kshiftlw $7, %k2, %k1 +; KNL-NEXT: kmovw %eax, %k1 +; KNL-NEXT: kshiftlw $7, %k1, %k1 ; KNL-NEXT: korw %k1, %k0, %k1 ; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; KNL-NEXT: vpmovqw %zmm0, %xmm0 @@ -1195,16 +1197,20 @@ define <8 x i1> @test18(i8 %a, i16 %y) { ; SKX-NEXT: kmovd %esi, %k1 ; SKX-NEXT: kshiftlw $7, %k1, %k2 ; SKX-NEXT: kshiftrw $15, %k2, %k2 +; SKX-NEXT: kmovd %k2, %eax ; SKX-NEXT: kshiftlw $6, %k1, %k1 ; SKX-NEXT: kshiftrw $15, %k1, %k1 +; SKX-NEXT: kmovd %k1, %ecx ; SKX-NEXT: vpmovm2q %k0, %zmm0 -; SKX-NEXT: vpmovm2q %k1, %zmm1 +; SKX-NEXT: kmovd %ecx, %k0 +; SKX-NEXT: vpmovm2q %k0, %zmm1 ; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7] ; SKX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 ; SKX-NEXT: vpmovq2m %zmm2, %k0 ; SKX-NEXT: kshiftlb $1, %k0, %k0 ; SKX-NEXT: kshiftrb $1, %k0, %k0 -; SKX-NEXT: kshiftlb $7, %k2, %k1 +; SKX-NEXT: kmovd %eax, %k1 +; SKX-NEXT: kshiftlb $7, %k1, %k1 ; SKX-NEXT: korb %k1, %k0, %k0 ; SKX-NEXT: vpmovm2w %k0, %xmm0 ; SKX-NEXT: vzeroupper @@ -1216,6 +1222,7 @@ define <8 x i1> @test18(i8 %a, i16 %y) { ; AVX512BW-NEXT: kmovd %esi, %k0 ; AVX512BW-NEXT: kshiftlw $7, %k0, %k2 ; AVX512BW-NEXT: kshiftrw $15, %k2, %k2 +; AVX512BW-NEXT: kmovd %k2, %eax ; AVX512BW-NEXT: kshiftlw $6, %k0, %k0 ; AVX512BW-NEXT: kshiftrw $15, %k0, %k0 ; AVX512BW-NEXT: kmovd %k0, %ecx @@ -1228,7 +1235,8 @@ define <8 x i1> @test18(i8 %a, i16 %y) { ; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0 ; AVX512BW-NEXT: kshiftlw $1, %k0, %k0 ; AVX512BW-NEXT: kshiftrw $1, %k0, %k0 -; AVX512BW-NEXT: kshiftlw $7, %k2, %k1 +; AVX512BW-NEXT: kmovd %eax, %k1 +; AVX512BW-NEXT: kshiftlw $7, %k1, %k1 ; AVX512BW-NEXT: korw %k1, %k0, %k0 ; AVX512BW-NEXT: vpmovm2w %k0, %zmm0 ; AVX512BW-NEXT: ## kill: %XMM0 %XMM0 %ZMM0 @@ -1241,16 +1249,20 @@ define <8 x i1> @test18(i8 %a, i16 %y) { ; AVX512DQ-NEXT: kmovw %esi, %k1 ; AVX512DQ-NEXT: kshiftlw $7, %k1, %k2 ; AVX512DQ-NEXT: kshiftrw $15, %k2, %k2 +; AVX512DQ-NEXT: kmovw %k2, %eax ; AVX512DQ-NEXT: kshiftlw $6, %k1, %k1 ; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1 +; AVX512DQ-NEXT: kmovw %k1, %ecx ; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0 -; AVX512DQ-NEXT: vpmovm2q %k1, %zmm1 +; AVX512DQ-NEXT: kmovw %ecx, %k0 +; AVX512DQ-NEXT: vpmovm2q %k0, %zmm1 ; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7] ; AVX512DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 ; AVX512DQ-NEXT: vpmovq2m %zmm2, %k0 ; AVX512DQ-NEXT: kshiftlb $1, %k0, %k0 ; AVX512DQ-NEXT: kshiftrb $1, %k0, %k0 -; AVX512DQ-NEXT: kshiftlb $7, %k2, %k1 +; AVX512DQ-NEXT: kmovw %eax, %k1 +; AVX512DQ-NEXT: kshiftlb $7, %k1, %k1 ; AVX512DQ-NEXT: korb %k1, %k0, %k0 ; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0 ; AVX512DQ-NEXT: vpmovqw %zmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll index 026b81d1a89b..c6e1dbd8811b 100644 --- a/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll +++ b/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll @@ -2005,7 +2005,7 @@ define i64 @test_mask_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) { ; AVX512F-32-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm2 ; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm0[4,5,6,7] ; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0 -; AVX512F-32-NEXT: movl %ecx, %eax +; AVX512F-32-NEXT: movl %esi, %eax ; AVX512F-32-NEXT: shrl $30, %eax ; AVX512F-32-NEXT: kmovd %eax, %k1 ; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0 @@ -2016,7 +2016,7 @@ define i64 @test_mask_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) { ; AVX512F-32-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm1 ; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7] ; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0 -; AVX512F-32-NEXT: movl %ecx, %eax +; AVX512F-32-NEXT: movl %esi, %eax ; AVX512F-32-NEXT: shrl $31, %eax ; AVX512F-32-NEXT: kmovd %eax, %k1 ; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0 @@ -2891,7 +2891,7 @@ define i64 @test_mask_x86_avx512_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %m ; AVX512F-32-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm2 ; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm0[4,5,6,7] ; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0 -; AVX512F-32-NEXT: movl %ecx, %eax +; AVX512F-32-NEXT: movl %esi, %eax ; AVX512F-32-NEXT: shrl $30, %eax ; AVX512F-32-NEXT: kmovd %eax, %k1 ; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0 @@ -2902,7 +2902,7 @@ define i64 @test_mask_x86_avx512_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %m ; AVX512F-32-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm1 ; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7] ; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0 -; AVX512F-32-NEXT: movl %ecx, %eax +; AVX512F-32-NEXT: movl %esi, %eax ; AVX512F-32-NEXT: shrl $31, %eax ; AVX512F-32-NEXT: kmovd %eax, %k1 ; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0 diff --git a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll index 88d189f3bf38..9b6401d1a76c 100644 --- a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll +++ b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll @@ -546,7 +546,7 @@ define <8 x i32> @ext_i8_8i32(i8 %a0) { ; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] ; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSE2-SSSE3-NEXT: pslld $31, %xmm0 ; SSE2-SSSE3-NEXT: psrad $31, %xmm0 ; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] @@ -722,7 +722,7 @@ define <16 x i16> @ext_i16_16i16(i16 %a0) { ; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] ; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-SSSE3-NEXT: psllw $15, %xmm0 ; SSE2-SSSE3-NEXT: psraw $15, %xmm0 ; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] @@ -1753,7 +1753,7 @@ define <16 x i32> @ext_i16_16i32(i16 %a0) { ; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm1 ; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSE2-SSSE3-NEXT: pslld $31, %xmm0 ; SSE2-SSSE3-NEXT: psrad $31, %xmm0 ; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] @@ -2103,7 +2103,7 @@ define <32 x i16> @ext_i32_32i16(i32 %a0) { ; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] ; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-SSSE3-NEXT: psllw $15, %xmm0 ; SSE2-SSSE3-NEXT: psraw $15, %xmm0 ; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] diff --git a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll index 464e942f4cb3..aa9e60df1404 100644 --- a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll +++ b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll @@ -649,7 +649,7 @@ define <8 x i32> @ext_i8_8i32(i8 %a0) { ; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] ; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1] ; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0 ; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] @@ -808,7 +808,7 @@ define <16 x i16> @ext_i16_16i16(i16 %a0) { ; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] ; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1] ; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0 ; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] @@ -1667,7 +1667,7 @@ define <16 x i32> @ext_i16_16i32(i16 %a0) { ; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm1 ; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1] ; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0 ; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] @@ -2008,7 +2008,7 @@ define <32 x i16> @ext_i32_32i16(i32 %a0) { ; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] ; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1,1,1,1,1] ; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0 ; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] diff --git a/llvm/test/CodeGen/X86/buildvec-insertvec.ll b/llvm/test/CodeGen/X86/buildvec-insertvec.ll index 076df3a90032..cd5abc1373b9 100644 --- a/llvm/test/CodeGen/X86/buildvec-insertvec.ll +++ b/llvm/test/CodeGen/X86/buildvec-insertvec.ll @@ -38,7 +38,7 @@ define <4 x float> @test_negative_zero_1(<4 x float> %A) { ; SSE2-LABEL: test_negative_zero_1: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movaps %xmm0, %xmm1 -; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1] +; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] ; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero ; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE2-NEXT: xorps %xmm2, %xmm2 diff --git a/llvm/test/CodeGen/X86/combine-fcopysign.ll b/llvm/test/CodeGen/X86/combine-fcopysign.ll index c87ca0825a31..43e09bfe5fea 100644 --- a/llvm/test/CodeGen/X86/combine-fcopysign.ll +++ b/llvm/test/CodeGen/X86/combine-fcopysign.ll @@ -231,8 +231,8 @@ define <4 x double> @combine_vec_fcopysign_fpext_sgn(<4 x double> %x, <4 x float ; SSE-NEXT: cvtss2sd %xmm2, %xmm4 ; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm2[1,1,3,3] ; SSE-NEXT: movaps %xmm2, %xmm6 -; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm2[1],xmm6[1] -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm2[2,3] +; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm6[1,1] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3] ; SSE-NEXT: movaps {{.*#+}} xmm7 ; SSE-NEXT: movaps %xmm0, %xmm2 ; SSE-NEXT: andps %xmm7, %xmm2 @@ -247,7 +247,7 @@ define <4 x double> @combine_vec_fcopysign_fpext_sgn(<4 x double> %x, <4 x float ; SSE-NEXT: orps %xmm0, %xmm4 ; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm4[0] ; SSE-NEXT: movaps %xmm1, %xmm0 -; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1] +; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; SSE-NEXT: andps %xmm7, %xmm0 ; SSE-NEXT: cvtss2sd %xmm3, %xmm3 ; SSE-NEXT: andps %xmm8, %xmm3 @@ -294,7 +294,7 @@ define <4 x float> @combine_vec_fcopysign_fptrunc_sgn(<4 x float> %x, <4 x doubl ; SSE-NEXT: orps %xmm6, %xmm1 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE-NEXT: movaps %xmm3, %xmm1 -; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm3[1],xmm1[1] +; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: andps %xmm5, %xmm1 ; SSE-NEXT: xorps %xmm6, %xmm6 ; SSE-NEXT: cvtsd2ss %xmm2, %xmm6 diff --git a/llvm/test/CodeGen/X86/complex-fastmath.ll b/llvm/test/CodeGen/X86/complex-fastmath.ll index d3b9c7b9b5fd..d31707260a0a 100644 --- a/llvm/test/CodeGen/X86/complex-fastmath.ll +++ b/llvm/test/CodeGen/X86/complex-fastmath.ll @@ -14,7 +14,7 @@ define <2 x float> @complex_square_f32(<2 x float>) #0 { ; SSE: # BB#0: ; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: addss %xmm0, %xmm2 +; SSE-NEXT: addss %xmm2, %xmm2 ; SSE-NEXT: mulss %xmm1, %xmm2 ; SSE-NEXT: mulss %xmm0, %xmm0 ; SSE-NEXT: mulss %xmm1, %xmm1 @@ -58,9 +58,9 @@ define <2 x double> @complex_square_f64(<2 x double>) #0 { ; SSE-LABEL: complex_square_f64: ; SSE: # BB#0: ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1] +; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: addsd %xmm0, %xmm2 +; SSE-NEXT: addsd %xmm2, %xmm2 ; SSE-NEXT: mulsd %xmm1, %xmm2 ; SSE-NEXT: mulsd %xmm0, %xmm0 ; SSE-NEXT: mulsd %xmm1, %xmm1 @@ -161,9 +161,9 @@ define <2 x double> @complex_mul_f64(<2 x double>, <2 x double>) #0 { ; SSE-LABEL: complex_mul_f64: ; SSE: # BB#0: ; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] +; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1] ; SSE-NEXT: movaps %xmm1, %xmm3 -; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm1[1],xmm3[1] +; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1] ; SSE-NEXT: movaps %xmm3, %xmm4 ; SSE-NEXT: mulsd %xmm0, %xmm4 ; SSE-NEXT: mulsd %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/divide-by-constant.ll b/llvm/test/CodeGen/X86/divide-by-constant.ll index 579aa101736b..ee53dd3233d3 100644 --- a/llvm/test/CodeGen/X86/divide-by-constant.ll +++ b/llvm/test/CodeGen/X86/divide-by-constant.ll @@ -318,7 +318,7 @@ define i64 @PR23590(i64 %x) nounwind { ; X64: # BB#0: # %entry ; X64-NEXT: movq %rdi, %rcx ; X64-NEXT: movabsq $6120523590596543007, %rdx # imm = 0x54F077C718E7C21F -; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %rcx, %rax ; X64-NEXT: mulq %rdx ; X64-NEXT: shrq $12, %rdx ; X64-NEXT: imulq $12345, %rdx, %rax # imm = 0x3039 diff --git a/llvm/test/CodeGen/X86/fmaxnum.ll b/llvm/test/CodeGen/X86/fmaxnum.ll index 665fb708e4b0..ebfbd064572a 100644 --- a/llvm/test/CodeGen/X86/fmaxnum.ll +++ b/llvm/test/CodeGen/X86/fmaxnum.ll @@ -18,7 +18,7 @@ declare <8 x double> @llvm.maxnum.v8f64(<8 x double>, <8 x double>) ; CHECK-LABEL: @test_fmaxf ; SSE: movaps %xmm0, %xmm2 -; SSE-NEXT: cmpunordss %xmm0, %xmm2 +; SSE-NEXT: cmpunordss %xmm2, %xmm2 ; SSE-NEXT: movaps %xmm2, %xmm3 ; SSE-NEXT: andps %xmm1, %xmm3 ; SSE-NEXT: maxss %xmm0, %xmm1 @@ -47,7 +47,7 @@ define float @test_fmaxf_minsize(float %x, float %y) minsize { ; CHECK-LABEL: @test_fmax ; SSE: movapd %xmm0, %xmm2 -; SSE-NEXT: cmpunordsd %xmm0, %xmm2 +; SSE-NEXT: cmpunordsd %xmm2, %xmm2 ; SSE-NEXT: movapd %xmm2, %xmm3 ; SSE-NEXT: andpd %xmm1, %xmm3 ; SSE-NEXT: maxsd %xmm0, %xmm1 @@ -74,7 +74,7 @@ define x86_fp80 @test_fmaxl(x86_fp80 %x, x86_fp80 %y) { ; CHECK-LABEL: @test_intrinsic_fmaxf ; SSE: movaps %xmm0, %xmm2 -; SSE-NEXT: cmpunordss %xmm0, %xmm2 +; SSE-NEXT: cmpunordss %xmm2, %xmm2 ; SSE-NEXT: movaps %xmm2, %xmm3 ; SSE-NEXT: andps %xmm1, %xmm3 ; SSE-NEXT: maxss %xmm0, %xmm1 @@ -95,7 +95,7 @@ define float @test_intrinsic_fmaxf(float %x, float %y) { ; CHECK-LABEL: @test_intrinsic_fmax ; SSE: movapd %xmm0, %xmm2 -; SSE-NEXT: cmpunordsd %xmm0, %xmm2 +; SSE-NEXT: cmpunordsd %xmm2, %xmm2 ; SSE-NEXT: movapd %xmm2, %xmm3 ; SSE-NEXT: andpd %xmm1, %xmm3 ; SSE-NEXT: maxsd %xmm0, %xmm1 diff --git a/llvm/test/CodeGen/X86/fminnum.ll b/llvm/test/CodeGen/X86/fminnum.ll index e0e5df702df6..afe8b804f267 100644 --- a/llvm/test/CodeGen/X86/fminnum.ll +++ b/llvm/test/CodeGen/X86/fminnum.ll @@ -18,7 +18,7 @@ declare <8 x double> @llvm.minnum.v8f64(<8 x double>, <8 x double>) ; CHECK-LABEL: @test_fminf ; SSE: movaps %xmm0, %xmm2 -; SSE-NEXT: cmpunordss %xmm0, %xmm2 +; SSE-NEXT: cmpunordss %xmm2, %xmm2 ; SSE-NEXT: movaps %xmm2, %xmm3 ; SSE-NEXT: andps %xmm1, %xmm3 ; SSE-NEXT: minss %xmm0, %xmm1 @@ -40,7 +40,7 @@ define float @test_fminf(float %x, float %y) { ; CHECK-LABEL: @test_fmin ; SSE: movapd %xmm0, %xmm2 -; SSE-NEXT: cmpunordsd %xmm0, %xmm2 +; SSE-NEXT: cmpunordsd %xmm2, %xmm2 ; SSE-NEXT: movapd %xmm2, %xmm3 ; SSE-NEXT: andpd %xmm1, %xmm3 ; SSE-NEXT: minsd %xmm0, %xmm1 @@ -67,7 +67,7 @@ define x86_fp80 @test_fminl(x86_fp80 %x, x86_fp80 %y) { ; CHECK-LABEL: @test_intrinsic_fminf ; SSE: movaps %xmm0, %xmm2 -; SSE-NEXT: cmpunordss %xmm0, %xmm2 +; SSE-NEXT: cmpunordss %xmm2, %xmm2 ; SSE-NEXT: movaps %xmm2, %xmm3 ; SSE-NEXT: andps %xmm1, %xmm3 ; SSE-NEXT: minss %xmm0, %xmm1 @@ -87,7 +87,7 @@ define float @test_intrinsic_fminf(float %x, float %y) { ; CHECK-LABEL: @test_intrinsic_fmin ; SSE: movapd %xmm0, %xmm2 -; SSE-NEXT: cmpunordsd %xmm0, %xmm2 +; SSE-NEXT: cmpunordsd %xmm2, %xmm2 ; SSE-NEXT: movapd %xmm2, %xmm3 ; SSE-NEXT: andpd %xmm1, %xmm3 ; SSE-NEXT: minsd %xmm0, %xmm1 diff --git a/llvm/test/CodeGen/X86/fp128-i128.ll b/llvm/test/CodeGen/X86/fp128-i128.ll index a249cd191791..98082ec611d4 100644 --- a/llvm/test/CodeGen/X86/fp128-i128.ll +++ b/llvm/test/CodeGen/X86/fp128-i128.ll @@ -227,7 +227,7 @@ define fp128 @TestI128_4(fp128 %x) #0 { ; CHECK: # BB#0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: movaps %xmm0, %xmm1 -; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) +; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax ; CHECK-NEXT: movq %rax, {{[0-9]+}}(%rsp) ; CHECK-NEXT: movq $0, (%rsp) @@ -275,7 +275,7 @@ define fp128 @acosl(fp128 %x) #0 { ; CHECK: # BB#0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: movaps %xmm0, %xmm1 -; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) +; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) ; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax ; CHECK-NEXT: movq %rax, {{[0-9]+}}(%rsp) ; CHECK-NEXT: movq $0, (%rsp) diff --git a/llvm/test/CodeGen/X86/haddsub-2.ll b/llvm/test/CodeGen/X86/haddsub-2.ll index 5ab976c49d9e..fd023d018031 100644 --- a/llvm/test/CodeGen/X86/haddsub-2.ll +++ b/llvm/test/CodeGen/X86/haddsub-2.ll @@ -908,16 +908,16 @@ define <4 x float> @not_a_hsub_2(<4 x float> %A, <4 x float> %B) { ; SSE-LABEL: not_a_hsub_2: ; SSE: # BB#0: ; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] +; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1] ; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3] ; SSE-NEXT: subss %xmm3, %xmm2 ; SSE-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3] ; SSE-NEXT: subss %xmm3, %xmm0 ; SSE-NEXT: movaps %xmm1, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm1[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3] ; SSE-NEXT: movaps %xmm1, %xmm4 -; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm1[1],xmm4[1] +; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1] ; SSE-NEXT: subss %xmm4, %xmm3 ; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm1[1,1,3,3] ; SSE-NEXT: subss %xmm4, %xmm1 @@ -965,10 +965,10 @@ define <2 x double> @not_a_hsub_3(<2 x double> %A, <2 x double> %B) { ; SSE-LABEL: not_a_hsub_3: ; SSE: # BB#0: ; SSE-NEXT: movaps %xmm1, %xmm2 -; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm1[1],xmm2[1] +; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1] ; SSE-NEXT: subsd %xmm2, %xmm1 ; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] +; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1] ; SSE-NEXT: subsd %xmm0, %xmm2 ; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; SSE-NEXT: movapd %xmm2, %xmm0 diff --git a/llvm/test/CodeGen/X86/haddsub-undef.ll b/llvm/test/CodeGen/X86/haddsub-undef.ll index b3d3e431d3c6..091d1a22dbcd 100644 --- a/llvm/test/CodeGen/X86/haddsub-undef.ll +++ b/llvm/test/CodeGen/X86/haddsub-undef.ll @@ -103,7 +103,7 @@ define <2 x double> @test5_undef(<2 x double> %a, <2 x double> %b) { ; SSE-LABEL: test5_undef: ; SSE: # BB#0: ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1] +; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: addsd %xmm0, %xmm1 ; SSE-NEXT: movapd %xmm1, %xmm0 ; SSE-NEXT: retq @@ -168,7 +168,7 @@ define <4 x float> @test8_undef(<4 x float> %a, <4 x float> %b) { ; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE-NEXT: addss %xmm0, %xmm1 ; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] +; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1] ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] ; SSE-NEXT: addss %xmm2, %xmm0 ; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0] diff --git a/llvm/test/CodeGen/X86/half.ll b/llvm/test/CodeGen/X86/half.ll index 8e7edd579fbb..b7c43d3b2e3e 100644 --- a/llvm/test/CodeGen/X86/half.ll +++ b/llvm/test/CodeGen/X86/half.ll @@ -386,7 +386,7 @@ define <4 x float> @test_extend32_vec4(<4 x half>* %p) #0 { ; CHECK-LIBCALL-NEXT: pushq %rbx ; CHECK-LIBCALL-NEXT: subq $48, %rsp ; CHECK-LIBCALL-NEXT: movq %rdi, %rbx -; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi +; CHECK-LIBCALL-NEXT: movzwl (%rbx), %edi ; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee ; CHECK-LIBCALL-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill ; CHECK-LIBCALL-NEXT: movzwl 2(%rbx), %edi @@ -472,7 +472,7 @@ define <4 x double> @test_extend64_vec4(<4 x half>* %p) #0 { ; CHECK-LIBCALL-NEXT: pushq %rbx ; CHECK-LIBCALL-NEXT: subq $16, %rsp ; CHECK-LIBCALL-NEXT: movq %rdi, %rbx -; CHECK-LIBCALL-NEXT: movzwl 4(%rdi), %edi +; CHECK-LIBCALL-NEXT: movzwl 4(%rbx), %edi ; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee ; CHECK-LIBCALL-NEXT: movss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Spill ; CHECK-LIBCALL-NEXT: movzwl 6(%rbx), %edi @@ -657,7 +657,7 @@ define void @test_trunc32_vec4(<4 x float> %a, <4 x half>* %p) #0 { ; CHECK-I686-NEXT: movaps %xmm0, {{[0-9]+}}(%esp) # 16-byte Spill ; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %ebp ; CHECK-I686-NEXT: movaps %xmm0, %xmm1 -; CHECK-I686-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3] +; CHECK-I686-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] ; CHECK-I686-NEXT: movss %xmm1, (%esp) ; CHECK-I686-NEXT: calll __gnu_f2h_ieee ; CHECK-I686-NEXT: movw %ax, %si diff --git a/llvm/test/CodeGen/X86/inline-asm-fpstack.ll b/llvm/test/CodeGen/X86/inline-asm-fpstack.ll index 1e1804767938..b107aa09d259 100644 --- a/llvm/test/CodeGen/X86/inline-asm-fpstack.ll +++ b/llvm/test/CodeGen/X86/inline-asm-fpstack.ll @@ -162,7 +162,6 @@ define void @testPR4459(x86_fp80 %a) { ; CHECK-NEXT: fstpt (%esp) ; CHECK-NEXT: calll _ceil ; CHECK-NEXT: fld %st(0) -; CHECK-NEXT: fxch %st(1) ; CHECK-NEXT: ## InlineAsm Start ; CHECK-NEXT: fistpl %st(0) ; CHECK-NEXT: ## InlineAsm End diff --git a/llvm/test/CodeGen/X86/ipra-local-linkage.ll b/llvm/test/CodeGen/X86/ipra-local-linkage.ll index 787b16f0d5b3..a394ed3e3858 100644 --- a/llvm/test/CodeGen/X86/ipra-local-linkage.ll +++ b/llvm/test/CodeGen/X86/ipra-local-linkage.ll @@ -24,7 +24,7 @@ define void @bar(i32 %X) { call void @foo() ; CHECK-LABEL: bar: ; CHECK: callq foo - ; CHECK-NEXT: movl %edi, %r15d + ; CHECK-NEXT: movl %eax, %r15d call void asm sideeffect "movl $0, %r12d", "{r15}~{r12}"(i32 %X) ret void } diff --git a/llvm/test/CodeGen/X86/localescape.ll b/llvm/test/CodeGen/X86/localescape.ll index a49af0898680..10ab8dd9672f 100644 --- a/llvm/test/CodeGen/X86/localescape.ll +++ b/llvm/test/CodeGen/X86/localescape.ll @@ -27,7 +27,7 @@ define void @print_framealloc_from_fp(i8* %fp) { ; X64-LABEL: print_framealloc_from_fp: ; X64: movq %rcx, %[[parent_fp:[a-z]+]] -; X64: movl .Lalloc_func$frame_escape_0(%rcx), %edx +; X64: movl .Lalloc_func$frame_escape_0(%[[parent_fp]]), %edx ; X64: leaq {{.*}}(%rip), %[[str:[a-z]+]] ; X64: movq %[[str]], %rcx ; X64: callq printf diff --git a/llvm/test/CodeGen/X86/mul-i1024.ll b/llvm/test/CodeGen/X86/mul-i1024.ll index fcaf1a2da327..87661004373f 100644 --- a/llvm/test/CodeGen/X86/mul-i1024.ll +++ b/llvm/test/CodeGen/X86/mul-i1024.ll @@ -159,7 +159,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl %esi ; X32-NEXT: movl %esi, %ebx -; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: pushl %edi ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -752,7 +752,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: pushl $0 ; X32-NEXT: pushl %edi ; X32-NEXT: movl %ebx, %esi -; X32-NEXT: pushl %ebx +; X32-NEXT: pushl %esi ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload @@ -898,6 +898,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: pushl %edi +; X32-NEXT: movl %edi, %ebx ; X32-NEXT: pushl %esi ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 @@ -909,7 +910,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 -; X32-NEXT: pushl %edi +; X32-NEXT: pushl %ebx ; X32-NEXT: pushl %esi ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 @@ -1364,7 +1365,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0 ; X32-NEXT: movl %edi, %ebx -; X32-NEXT: pushl %edi +; X32-NEXT: pushl %ebx ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload ; X32-NEXT: pushl %esi ; X32-NEXT: pushl $0 @@ -2441,7 +2442,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: adcl %edi, %eax ; X32-NEXT: movl %eax, %esi -; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: addl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload @@ -4264,6 +4265,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: adcq $0, %rbp ; X64-NEXT: addq %rcx, %rbx ; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rcx, %r11 ; X64-NEXT: adcq %rdi, %rbp ; X64-NEXT: setb %bl ; X64-NEXT: movzbl %bl, %ebx @@ -4273,12 +4275,12 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rcx, %r12 -; X64-NEXT: movq %rcx, %r8 +; X64-NEXT: movq %r11, %r12 +; X64-NEXT: movq %r11, %r8 ; X64-NEXT: addq %rax, %r12 ; X64-NEXT: movq %rdi, %rax ; X64-NEXT: movq %rdi, %r9 -; X64-NEXT: movq %rdi, (%rsp) # 8-byte Spill +; X64-NEXT: movq %r9, (%rsp) # 8-byte Spill ; X64-NEXT: adcq %rdx, %rax ; X64-NEXT: addq %rbp, %r12 ; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill @@ -4307,7 +4309,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: adcq %rdx, %rbx ; X64-NEXT: movq 16(%rsi), %rax ; X64-NEXT: movq %rsi, %r13 -; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill @@ -4320,7 +4322,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: adcq %rbx, %r11 ; X64-NEXT: movq %r8, %rax ; X64-NEXT: movq %r8, %rbp -; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: addq %rdi, %rax ; X64-NEXT: movq %r9, %rax ; X64-NEXT: adcq %rcx, %rax @@ -4332,7 +4334,8 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: addq %rdi, %rax -; X64-NEXT: movq %rdx, %rax +; X64-NEXT: movq %rdi, %r9 +; X64-NEXT: movq %rsi, %rax ; X64-NEXT: adcq %rcx, %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq 32(%r13), %rax @@ -4348,10 +4351,9 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: adcq %rdx, %rax ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rbp, %rax -; X64-NEXT: addq %rdi, %rax +; X64-NEXT: addq %r9, %rax ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %rdi, %r9 -; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: adcq %r15, %rax ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill @@ -4369,7 +4371,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: addq %rsi, %r11 ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: adcq $0, %rbp -; X64-NEXT: addq %rbx, %r11 +; X64-NEXT: addq %rcx, %r11 ; X64-NEXT: adcq %rsi, %rbp ; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: setb %bl @@ -4390,11 +4392,11 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: adcq %rbx, %r10 ; X64-NEXT: movq %rcx, %rdx ; X64-NEXT: movq %rcx, %r12 -; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: addq %r9, %rdx ; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %r11, %r8 -; X64-NEXT: adcq %r11, %r15 +; X64-NEXT: adcq %r8, %r15 ; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq %rax, %r14 ; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill @@ -4490,12 +4492,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: adcq %rdx, %r12 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax -; X64-NEXT: mulq %r10 +; X64-NEXT: movq %r10, %rbp +; X64-NEXT: mulq %rbp ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: mulq %r10 +; X64-NEXT: mulq %rbp ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: addq %rsi, %rbx @@ -4522,7 +4525,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: adcq $0, %r15 ; X64-NEXT: adcq $0, %r12 ; X64-NEXT: movq %r10, %rbx -; X64-NEXT: movq %r10, %rax +; X64-NEXT: movq %rbx, %rax ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload ; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %rcx @@ -4539,7 +4542,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: movq %rbx, %rax ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rcx, %rbx -; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, %r8 ; X64-NEXT: addq %rbp, %r8 @@ -4570,7 +4573,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: movq %r11, %rsi -; X64-NEXT: mulq %r11 +; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rdx, %r11 ; X64-NEXT: movq %rax, %r13 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload @@ -4650,12 +4653,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: adcq %rdx, %r10 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax -; X64-NEXT: mulq %r11 +; X64-NEXT: movq %r11, %rbp +; X64-NEXT: mulq %rbp ; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload ; X64-NEXT: movq %rsi, %rax -; X64-NEXT: mulq %r11 +; X64-NEXT: mulq %rbp ; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: addq %rdi, %rbx @@ -4785,7 +4789,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %r14 ; X64-NEXT: movq %r8, %rbp -; X64-NEXT: movq %r8, %rax +; X64-NEXT: movq %rbp, %rax ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rcx, %r11 ; X64-NEXT: movq %rdx, %rbx @@ -4845,7 +4849,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: adcq $0, %r9 ; X64-NEXT: adcq $0, %r10 ; X64-NEXT: movq %rbp, %rsi -; X64-NEXT: movq %rbp, %rax +; X64-NEXT: movq %rsi, %rax ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, %r14 @@ -4902,8 +4906,8 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: adcq $0, %r15 ; X64-NEXT: movq %rbp, %rax ; X64-NEXT: movq %r8, %rdi -; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: mulq %r8 +; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdx, %r9 ; X64-NEXT: movq %rax, %r8 ; X64-NEXT: addq %rbx, %r8 @@ -4986,12 +4990,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: movq %rcx, %r14 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax -; X64-NEXT: mulq %r10 +; X64-NEXT: movq %r10, %rdi +; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdx, %r11 ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload ; X64-NEXT: movq %rsi, %rax -; X64-NEXT: mulq %r10 +; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: addq %r11, %rbx @@ -5019,7 +5024,8 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: adcq $0, %r14 ; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r13, %rax +; X64-NEXT: movq %r13, %rbx +; X64-NEXT: movq %rbx, %rax ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, %r8 @@ -5032,7 +5038,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: movq %rax, %rcx ; X64-NEXT: addq %r8, %rcx ; X64-NEXT: adcq $0, %rsi -; X64-NEXT: movq %r13, %rax +; X64-NEXT: movq %rbx, %rax ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload ; X64-NEXT: mulq %r13 ; X64-NEXT: movq %rdx, %rbx @@ -5066,12 +5072,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: setb -{{[0-9]+}}(%rsp) # 1-byte Folded Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload ; X64-NEXT: movq %rbx, %rax -; X64-NEXT: mulq %r10 +; X64-NEXT: movq %r10, %rsi +; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload ; X64-NEXT: movq %r8, %rax -; X64-NEXT: mulq %r10 +; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rdi ; X64-NEXT: addq %rcx, %rdi @@ -5147,7 +5154,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: movq %r9, %rax ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rcx, %r10 -; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, %rdi ; X64-NEXT: addq %rsi, %rdi @@ -5159,16 +5166,16 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: movq %rdx, %r14 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload -; X64-NEXT: addq %rax, %r12 +; X64-NEXT: addq %rbx, %r12 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r15 # 8-byte Reload -; X64-NEXT: adcq %rdx, %r15 +; X64-NEXT: adcq %r14, %r15 ; X64-NEXT: addq %rdi, %r12 ; X64-NEXT: adcq %rcx, %r15 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: movq %r11, %rsi -; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: mulq %r11 +; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rdx, %r11 ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload @@ -5232,7 +5239,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, %r9 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload -; X64-NEXT: addq %rax, %rbp +; X64-NEXT: addq %r9, %rbp ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: adcq %rdx, %rax ; X64-NEXT: addq %rsi, %rbp @@ -5410,7 +5417,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: movq 88(%rsi), %rax ; X64-NEXT: movq %rsi, %r9 ; X64-NEXT: movq %rax, %rsi -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rcx, %r11 ; X64-NEXT: movq %rdx, %rbp @@ -5446,12 +5453,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: adcq %r8, %r10 ; X64-NEXT: addq %rbx, %rsi ; X64-NEXT: adcq %rbp, %r10 -; X64-NEXT: movq 64(%r9), %r13 +; X64-NEXT: movq %r9, %rdi +; X64-NEXT: movq 64(%rdi), %r13 ; X64-NEXT: movq %r13, %rax ; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: movq 72(%r9), %r9 +; X64-NEXT: movq 72(%rdi), %r9 ; X64-NEXT: movq %r9, %rax ; X64-NEXT: mulq %r11 ; X64-NEXT: movq %rdx, %rbp @@ -5479,8 +5487,8 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: movq %rdx, %r11 ; X64-NEXT: movq %rax, %r15 ; X64-NEXT: movq %r12, %rcx -; X64-NEXT: addq %rax, %rcx -; X64-NEXT: adcq %rdx, %r8 +; X64-NEXT: addq %r15, %rcx +; X64-NEXT: adcq %r11, %r8 ; X64-NEXT: addq %rbp, %rcx ; X64-NEXT: adcq %rbx, %r8 ; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload @@ -5532,13 +5540,14 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: setb %r10b ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload ; X64-NEXT: movq %rsi, %rax -; X64-NEXT: mulq %r8 +; X64-NEXT: movq %r8, %rdi +; X64-NEXT: mulq %rdi ; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rax, %r9 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload ; X64-NEXT: movq %rbp, %rax -; X64-NEXT: mulq %r8 -; X64-NEXT: movq %r8, %r12 +; X64-NEXT: mulq %rdi +; X64-NEXT: movq %rdi, %r12 ; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, %rbx ; X64-NEXT: addq %rcx, %rbx @@ -5577,7 +5586,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind { ; X64-NEXT: imulq %rcx, %rdi ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: movq %r12, %rsi -; X64-NEXT: mulq %r12 +; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rax, %r9 ; X64-NEXT: addq %rdi, %rdx ; X64-NEXT: movq 104(%rbp), %r8 diff --git a/llvm/test/CodeGen/X86/mul-i512.ll b/llvm/test/CodeGen/X86/mul-i512.ll index 40a29ddef2dd..3da17b69ffb5 100644 --- a/llvm/test/CodeGen/X86/mul-i512.ll +++ b/llvm/test/CodeGen/X86/mul-i512.ll @@ -909,7 +909,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X64-NEXT: movq 8(%rsi), %rbp ; X64-NEXT: movq %r15, %rax ; X64-NEXT: movq %rdx, %rsi -; X64-NEXT: mulq %rdx +; X64-NEXT: mulq %rsi ; X64-NEXT: movq %rdx, %r9 ; X64-NEXT: movq %rax, %r8 ; X64-NEXT: movq %r11, %rax @@ -932,24 +932,23 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X64-NEXT: movq %r11, %rax ; X64-NEXT: mulq %rbp ; X64-NEXT: movq %rbp, %r14 -; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rax, %rbp ; X64-NEXT: addq %rcx, %rbp ; X64-NEXT: adcq %rbx, %rsi ; X64-NEXT: xorl %ecx, %ecx ; X64-NEXT: movq %r10, %rbx -; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: movq %r10, %rax +; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %rbx, %rax ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, %r13 ; X64-NEXT: movq %rax, %r10 ; X64-NEXT: movq %r15, %rax ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill -; X64-NEXT: # kill: %RAX -; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, %r15 +; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: addq %r10, %r15 ; X64-NEXT: adcq %r13, %rdx ; X64-NEXT: addq %rbp, %r15 @@ -988,8 +987,8 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X64-NEXT: mulq %rdx ; X64-NEXT: movq %rdx, %r14 ; X64-NEXT: movq %rax, %r11 -; X64-NEXT: addq %rax, %r10 -; X64-NEXT: adcq %rdx, %r13 +; X64-NEXT: addq %r11, %r10 +; X64-NEXT: adcq %r14, %r13 ; X64-NEXT: addq %rbp, %r10 ; X64-NEXT: adcq %rsi, %r13 ; X64-NEXT: addq %r8, %r10 @@ -1001,7 +1000,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X64-NEXT: movq 16(%rsi), %r8 ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: movq %rcx, %r9 -; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: mulq %r8 ; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rax, %r12 @@ -1032,7 +1031,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind { ; X64-NEXT: mulq %rcx ; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, %rbp -; X64-NEXT: addq %rax, %r11 +; X64-NEXT: addq %rbp, %r11 ; X64-NEXT: adcq %rdx, %r14 ; X64-NEXT: addq %r9, %r11 ; X64-NEXT: adcq %rbx, %r14 diff --git a/llvm/test/CodeGen/X86/mul128.ll b/llvm/test/CodeGen/X86/mul128.ll index ff0d558347fc..2b3a13509b3c 100644 --- a/llvm/test/CodeGen/X86/mul128.ll +++ b/llvm/test/CodeGen/X86/mul128.ll @@ -7,7 +7,7 @@ define i128 @foo(i128 %t, i128 %u) { ; X64-NEXT: movq %rdx, %r8 ; X64-NEXT: imulq %rdi, %rcx ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: mulq %rdx +; X64-NEXT: mulq %r8 ; X64-NEXT: addq %rcx, %rdx ; X64-NEXT: imulq %r8, %rsi ; X64-NEXT: addq %rsi, %rdx diff --git a/llvm/test/CodeGen/X86/pmul.ll b/llvm/test/CodeGen/X86/pmul.ll index 71cc185452f0..72f61bd6f484 100644 --- a/llvm/test/CodeGen/X86/pmul.ll +++ b/llvm/test/CodeGen/X86/pmul.ll @@ -9,7 +9,7 @@ define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind { ; SSE2-LABEL: mul_v16i8c: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117] ; SSE2-NEXT: pmullw %xmm2, %xmm1 @@ -143,10 +143,10 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind { ; SSE2-LABEL: mul_v16i8: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm3 ; SSE2-NEXT: pmullw %xmm2, %xmm3 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] @@ -386,7 +386,7 @@ define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind { ; SSE2-LABEL: mul_v32i8c: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [117,117,117,117,117,117,117,117] ; SSE2-NEXT: pmullw %xmm3, %xmm2 @@ -398,7 +398,7 @@ define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind { ; SSE2-NEXT: pand %xmm4, %xmm0 ; SSE2-NEXT: packuswb %xmm2, %xmm0 ; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: pmullw %xmm3, %xmm2 ; SSE2-NEXT: pand %xmm4, %xmm2 @@ -567,10 +567,10 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind { ; SSE2-LABEL: mul_v32i8: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm2, %xmm4 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: movdqa %xmm0, %xmm5 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: pmullw %xmm4, %xmm5 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] @@ -583,10 +583,10 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind { ; SSE2-NEXT: pand %xmm4, %xmm0 ; SSE2-NEXT: packuswb %xmm5, %xmm0 ; SSE2-NEXT: movdqa %xmm3, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa %xmm1, %xmm5 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: pmullw %xmm2, %xmm5 ; SSE2-NEXT: pand %xmm4, %xmm5 @@ -774,7 +774,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind { ; SSE2-LABEL: mul_v64i8c: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm6 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm6 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [117,117,117,117,117,117,117,117] ; SSE2-NEXT: pmullw %xmm4, %xmm6 @@ -786,7 +786,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind { ; SSE2-NEXT: pand %xmm5, %xmm0 ; SSE2-NEXT: packuswb %xmm6, %xmm0 ; SSE2-NEXT: movdqa %xmm1, %xmm6 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm1[8],xmm6[9],xmm1[9],xmm6[10],xmm1[10],xmm6[11],xmm1[11],xmm6[12],xmm1[12],xmm6[13],xmm1[13],xmm6[14],xmm1[14],xmm6[15],xmm1[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm6 ; SSE2-NEXT: pmullw %xmm4, %xmm6 ; SSE2-NEXT: pand %xmm5, %xmm6 @@ -796,7 +796,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind { ; SSE2-NEXT: pand %xmm5, %xmm1 ; SSE2-NEXT: packuswb %xmm6, %xmm1 ; SSE2-NEXT: movdqa %xmm2, %xmm6 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm6 ; SSE2-NEXT: pmullw %xmm4, %xmm6 ; SSE2-NEXT: pand %xmm5, %xmm6 @@ -806,7 +806,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind { ; SSE2-NEXT: pand %xmm5, %xmm2 ; SSE2-NEXT: packuswb %xmm6, %xmm2 ; SSE2-NEXT: movdqa %xmm3, %xmm6 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm3[8],xmm6[9],xmm3[9],xmm6[10],xmm3[10],xmm6[11],xmm3[11],xmm6[12],xmm3[12],xmm6[13],xmm3[13],xmm6[14],xmm3[14],xmm6[15],xmm3[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm6 ; SSE2-NEXT: pmullw %xmm4, %xmm6 ; SSE2-NEXT: pand %xmm5, %xmm6 @@ -821,7 +821,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind { ; SSE41: # BB#0: # %entry ; SSE41-NEXT: movdqa %xmm1, %xmm4 ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 +; SSE41-NEXT: pmovsxbw %xmm1, %xmm0 ; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [117,117,117,117,117,117,117,117] ; SSE41-NEXT: pmullw %xmm6, %xmm0 ; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255] @@ -939,10 +939,10 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind { ; SSE2-LABEL: mul_v64i8: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm4, %xmm8 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm4[8],xmm8[9],xmm4[9],xmm8[10],xmm4[10],xmm8[11],xmm4[11],xmm8[12],xmm4[12],xmm8[13],xmm4[13],xmm8[14],xmm4[14],xmm8[15],xmm4[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm8 ; SSE2-NEXT: movdqa %xmm0, %xmm9 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm0[8],xmm9[9],xmm0[9],xmm9[10],xmm0[10],xmm9[11],xmm0[11],xmm9[12],xmm0[12],xmm9[13],xmm0[13],xmm9[14],xmm0[14],xmm9[15],xmm0[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm9 ; SSE2-NEXT: pmullw %xmm8, %xmm9 ; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255] @@ -955,10 +955,10 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind { ; SSE2-NEXT: pand %xmm8, %xmm0 ; SSE2-NEXT: packuswb %xmm9, %xmm0 ; SSE2-NEXT: movdqa %xmm5, %xmm9 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm5[8],xmm9[9],xmm5[9],xmm9[10],xmm5[10],xmm9[11],xmm5[11],xmm9[12],xmm5[12],xmm9[13],xmm5[13],xmm9[14],xmm5[14],xmm9[15],xmm5[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm9 ; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: pmullw %xmm9, %xmm4 ; SSE2-NEXT: pand %xmm8, %xmm4 @@ -970,10 +970,10 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind { ; SSE2-NEXT: pand %xmm8, %xmm1 ; SSE2-NEXT: packuswb %xmm4, %xmm1 ; SSE2-NEXT: movdqa %xmm6, %xmm4 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: movdqa %xmm2, %xmm5 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: pmullw %xmm4, %xmm5 ; SSE2-NEXT: pand %xmm8, %xmm5 @@ -985,10 +985,10 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind { ; SSE2-NEXT: pand %xmm8, %xmm2 ; SSE2-NEXT: packuswb %xmm5, %xmm2 ; SSE2-NEXT: movdqa %xmm7, %xmm4 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm7[8],xmm4[9],xmm7[9],xmm4[10],xmm7[10],xmm4[11],xmm7[11],xmm4[12],xmm7[12],xmm4[13],xmm7[13],xmm4[14],xmm7[14],xmm4[15],xmm7[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: movdqa %xmm3, %xmm5 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm3[8],xmm5[9],xmm3[9],xmm5[10],xmm3[10],xmm5[11],xmm3[11],xmm5[12],xmm3[12],xmm5[13],xmm3[13],xmm5[14],xmm3[14],xmm5[15],xmm3[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: pmullw %xmm4, %xmm5 ; SSE2-NEXT: pand %xmm8, %xmm5 @@ -1006,7 +1006,7 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind { ; SSE41-NEXT: movdqa %xmm1, %xmm8 ; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: pmovsxbw %xmm4, %xmm9 -; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 +; SSE41-NEXT: pmovsxbw %xmm1, %xmm0 ; SSE41-NEXT: pmullw %xmm9, %xmm0 ; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,255,255] ; SSE41-NEXT: pand %xmm9, %xmm0 diff --git a/llvm/test/CodeGen/X86/powi.ll b/llvm/test/CodeGen/X86/powi.ll index 3fc2afe7ad08..fb7f570d6251 100644 --- a/llvm/test/CodeGen/X86/powi.ll +++ b/llvm/test/CodeGen/X86/powi.ll @@ -5,7 +5,7 @@ define double @pow_wrapper(double %a) nounwind readonly ssp noredzone { ; CHECK-LABEL: pow_wrapper: ; CHECK: # BB#0: ; CHECK-NEXT: movapd %xmm0, %xmm1 -; CHECK-NEXT: mulsd %xmm0, %xmm1 +; CHECK-NEXT: mulsd %xmm1, %xmm1 ; CHECK-NEXT: mulsd %xmm1, %xmm0 ; CHECK-NEXT: mulsd %xmm1, %xmm1 ; CHECK-NEXT: mulsd %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/pr11334.ll b/llvm/test/CodeGen/X86/pr11334.ll index bb39606a81a3..8a154653414a 100644 --- a/llvm/test/CodeGen/X86/pr11334.ll +++ b/llvm/test/CodeGen/X86/pr11334.ll @@ -25,7 +25,7 @@ define <3 x double> @v3f2d_ext_vec(<3 x float> %v1) nounwind { ; SSE-NEXT: cvtps2pd %xmm0, %xmm0 ; SSE-NEXT: movlps %xmm0, -{{[0-9]+}}(%rsp) ; SSE-NEXT: movaps %xmm2, %xmm1 -; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm2[1],xmm1[1] +; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: fldl -{{[0-9]+}}(%rsp) ; SSE-NEXT: movaps %xmm2, %xmm0 ; SSE-NEXT: retq diff --git a/llvm/test/CodeGen/X86/pr29112.ll b/llvm/test/CodeGen/X86/pr29112.ll index a3cff8a7e2bc..8c970b3d4771 100644 --- a/llvm/test/CodeGen/X86/pr29112.ll +++ b/llvm/test/CodeGen/X86/pr29112.ll @@ -49,16 +49,16 @@ define <4 x float> @bar(<4 x float>* %a1p, <4 x float>* %a2p, <4 x float> %a3, < ; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm9[0,1],xmm2[3],xmm9[3] ; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm12[0] ; CHECK-NEXT: vaddps %xmm3, %xmm2, %xmm2 -; CHECK-NEXT: vmovaps %xmm15, {{[0-9]+}}(%rsp) # 16-byte Spill -; CHECK-NEXT: vaddps %xmm0, %xmm15, %xmm9 +; CHECK-NEXT: vmovaps %xmm15, %xmm1 +; CHECK-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill +; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm9 ; CHECK-NEXT: vaddps %xmm14, %xmm10, %xmm0 -; CHECK-NEXT: vaddps %xmm15, %xmm15, %xmm8 +; CHECK-NEXT: vaddps %xmm1, %xmm1, %xmm8 ; CHECK-NEXT: vaddps %xmm11, %xmm3, %xmm3 ; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0 -; CHECK-NEXT: vaddps %xmm0, %xmm15, %xmm0 +; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: vmovaps %xmm8, {{[0-9]+}}(%rsp) ; CHECK-NEXT: vmovaps %xmm9, (%rsp) -; CHECK-NEXT: vmovaps %xmm15, %xmm1 ; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: callq foo diff --git a/llvm/test/CodeGen/X86/psubus.ll b/llvm/test/CodeGen/X86/psubus.ll index 911e46108f0e..a1f1e084d330 100644 --- a/llvm/test/CodeGen/X86/psubus.ll +++ b/llvm/test/CodeGen/X86/psubus.ll @@ -638,7 +638,7 @@ define <16 x i8> @test14(<16 x i8> %x, <16 x i32> %y) nounwind { ; SSE41-LABEL: test14: ; SSE41: ## BB#0: ## %vector.ph ; SSE41-NEXT: movdqa %xmm0, %xmm5 -; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,2,3] ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero ; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm5[2,3,0,1] diff --git a/llvm/test/CodeGen/X86/select.ll b/llvm/test/CodeGen/X86/select.ll index 15da6d651716..b37644e22917 100644 --- a/llvm/test/CodeGen/X86/select.ll +++ b/llvm/test/CodeGen/X86/select.ll @@ -23,7 +23,8 @@ define i32 @test1(%0* %p, %0* %q, i1 %r) nounwind { ; MCU-NEXT: jne .LBB0_1 ; MCU-NEXT: # BB#2: ; MCU-NEXT: addl $8, %edx -; MCU-NEXT: movl (%edx), %eax +; MCU-NEXT: movl %edx, %eax +; MCU-NEXT: movl (%eax), %eax ; MCU-NEXT: retl ; MCU-NEXT: .LBB0_1: ; MCU-NEXT: addl $8, %eax diff --git a/llvm/test/CodeGen/X86/shrink-wrap-chkstk.ll b/llvm/test/CodeGen/X86/shrink-wrap-chkstk.ll index 1364732813ad..099ef137d8d9 100644 --- a/llvm/test/CodeGen/X86/shrink-wrap-chkstk.ll +++ b/llvm/test/CodeGen/X86/shrink-wrap-chkstk.ll @@ -61,7 +61,7 @@ false: ; CHECK-LABEL: @use_eax_before_prologue@8: # @use_eax_before_prologue ; CHECK: movl %ecx, %eax -; CHECK: cmpl %edx, %ecx +; CHECK: cmpl %edx, %eax ; CHECK: jge LBB1_2 ; CHECK: pushl %eax ; CHECK: movl $4092, %eax diff --git a/llvm/test/CodeGen/X86/sqrt-fastmath.ll b/llvm/test/CodeGen/X86/sqrt-fastmath.ll index 54e8f067f360..af2dcc495f53 100644 --- a/llvm/test/CodeGen/X86/sqrt-fastmath.ll +++ b/llvm/test/CodeGen/X86/sqrt-fastmath.ll @@ -132,7 +132,7 @@ define float @f32_estimate(float %x) #1 { ; SSE: # BB#0: ; SSE-NEXT: rsqrtss %xmm0, %xmm1 ; SSE-NEXT: movaps %xmm1, %xmm2 -; SSE-NEXT: mulss %xmm1, %xmm2 +; SSE-NEXT: mulss %xmm2, %xmm2 ; SSE-NEXT: mulss %xmm0, %xmm2 ; SSE-NEXT: addss {{.*}}(%rip), %xmm2 ; SSE-NEXT: mulss {{.*}}(%rip), %xmm1 @@ -178,7 +178,7 @@ define <4 x float> @v4f32_estimate(<4 x float> %x) #1 { ; SSE: # BB#0: ; SSE-NEXT: rsqrtps %xmm0, %xmm1 ; SSE-NEXT: movaps %xmm1, %xmm2 -; SSE-NEXT: mulps %xmm1, %xmm2 +; SSE-NEXT: mulps %xmm2, %xmm2 ; SSE-NEXT: mulps %xmm0, %xmm2 ; SSE-NEXT: addps {{.*}}(%rip), %xmm2 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm1 @@ -228,7 +228,7 @@ define <8 x float> @v8f32_estimate(<8 x float> %x) #1 { ; SSE-NEXT: rsqrtps %xmm0, %xmm3 ; SSE-NEXT: movaps {{.*#+}} xmm4 = [-5.000000e-01,-5.000000e-01,-5.000000e-01,-5.000000e-01] ; SSE-NEXT: movaps %xmm3, %xmm2 -; SSE-NEXT: mulps %xmm3, %xmm2 +; SSE-NEXT: mulps %xmm2, %xmm2 ; SSE-NEXT: mulps %xmm0, %xmm2 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [-3.000000e+00,-3.000000e+00,-3.000000e+00,-3.000000e+00] ; SSE-NEXT: addps %xmm0, %xmm2 @@ -236,7 +236,7 @@ define <8 x float> @v8f32_estimate(<8 x float> %x) #1 { ; SSE-NEXT: mulps %xmm3, %xmm2 ; SSE-NEXT: rsqrtps %xmm1, %xmm5 ; SSE-NEXT: movaps %xmm5, %xmm3 -; SSE-NEXT: mulps %xmm5, %xmm3 +; SSE-NEXT: mulps %xmm3, %xmm3 ; SSE-NEXT: mulps %xmm1, %xmm3 ; SSE-NEXT: addps %xmm0, %xmm3 ; SSE-NEXT: mulps %xmm4, %xmm3 diff --git a/llvm/test/CodeGen/X86/sse-scalar-fp-arith.ll b/llvm/test/CodeGen/X86/sse-scalar-fp-arith.ll index b82dc3d2b8d0..ebc29b1393b0 100644 --- a/llvm/test/CodeGen/X86/sse-scalar-fp-arith.ll +++ b/llvm/test/CodeGen/X86/sse-scalar-fp-arith.ll @@ -1084,7 +1084,8 @@ define <4 x float> @add_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, ; SSE2-NEXT: testb $1, %dil ; SSE2-NEXT: jne .LBB62_1 ; SSE2-NEXT: # BB#2: -; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3] +; SSE2-NEXT: movaps %xmm2, %xmm1 +; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; SSE2-NEXT: retq ; SSE2-NEXT: .LBB62_1: ; SSE2-NEXT: addss %xmm0, %xmm1 @@ -1096,7 +1097,8 @@ define <4 x float> @add_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, ; SSE41-NEXT: testb $1, %dil ; SSE41-NEXT: jne .LBB62_1 ; SSE41-NEXT: # BB#2: -; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3] +; SSE41-NEXT: movaps %xmm2, %xmm1 +; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; SSE41-NEXT: retq ; SSE41-NEXT: .LBB62_1: ; SSE41-NEXT: addss %xmm0, %xmm1 @@ -1137,7 +1139,8 @@ define <2 x double> @add_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> ; SSE2-NEXT: testb $1, %dil ; SSE2-NEXT: jne .LBB63_1 ; SSE2-NEXT: # BB#2: -; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1] +; SSE2-NEXT: movapd %xmm2, %xmm1 +; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE2-NEXT: retq ; SSE2-NEXT: .LBB63_1: ; SSE2-NEXT: addsd %xmm0, %xmm1 @@ -1149,7 +1152,8 @@ define <2 x double> @add_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> ; SSE41-NEXT: testb $1, %dil ; SSE41-NEXT: jne .LBB63_1 ; SSE41-NEXT: # BB#2: -; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm2[0],xmm0[1] +; SSE41-NEXT: movapd %xmm2, %xmm1 +; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE41-NEXT: retq ; SSE41-NEXT: .LBB63_1: ; SSE41-NEXT: addsd %xmm0, %xmm1 diff --git a/llvm/test/CodeGen/X86/sse1.ll b/llvm/test/CodeGen/X86/sse1.ll index 5139275c1a6c..c74dec3e21b6 100644 --- a/llvm/test/CodeGen/X86/sse1.ll +++ b/llvm/test/CodeGen/X86/sse1.ll @@ -16,7 +16,7 @@ define <2 x float> @test4(<2 x float> %A, <2 x float> %B) nounwind { ; X32-LABEL: test4: ; X32: # BB#0: # %entry ; X32-NEXT: movaps %xmm0, %xmm2 -; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3] +; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3] ; X32-NEXT: addss %xmm1, %xmm0 ; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] ; X32-NEXT: subss %xmm1, %xmm2 @@ -26,7 +26,7 @@ define <2 x float> @test4(<2 x float> %A, <2 x float> %B) nounwind { ; X64-LABEL: test4: ; X64: # BB#0: # %entry ; X64-NEXT: movaps %xmm0, %xmm2 -; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3] +; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3] ; X64-NEXT: addss %xmm1, %xmm0 ; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] ; X64-NEXT: subss %xmm1, %xmm2 diff --git a/llvm/test/CodeGen/X86/sse3-avx-addsub-2.ll b/llvm/test/CodeGen/X86/sse3-avx-addsub-2.ll index dc54fa210375..b5aa26f532ef 100644 --- a/llvm/test/CodeGen/X86/sse3-avx-addsub-2.ll +++ b/llvm/test/CodeGen/X86/sse3-avx-addsub-2.ll @@ -406,9 +406,9 @@ define <4 x float> @test16(<4 x float> %A, <4 x float> %B) { ; SSE-NEXT: movaps %xmm0, %xmm2 ; SSE-NEXT: subss %xmm0, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm0[1],xmm3[1] +; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1] ; SSE-NEXT: movaps %xmm1, %xmm4 -; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm1[1],xmm4[1] +; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1] ; SSE-NEXT: subss %xmm4, %xmm3 ; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3] ; SSE-NEXT: addss %xmm0, %xmm4 diff --git a/llvm/test/CodeGen/X86/statepoint-live-in.ll b/llvm/test/CodeGen/X86/statepoint-live-in.ll index 5fb167225865..0179d37ad4e1 100644 --- a/llvm/test/CodeGen/X86/statepoint-live-in.ll +++ b/llvm/test/CodeGen/X86/statepoint-live-in.ll @@ -126,7 +126,7 @@ define void @test6(i32 %a) gc "statepoint-example" { ; CHECK-NEXT: Lcfi11: ; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: movl %edi, %ebx -; CHECK-NEXT: movl %edi, {{[0-9]+}}(%rsp) +; CHECK-NEXT: movl %ebx, {{[0-9]+}}(%rsp) ; CHECK-NEXT: callq _baz ; CHECK-NEXT: Ltmp6: ; CHECK-NEXT: callq _bar @@ -153,13 +153,13 @@ entry: ; CHECK: .byte 1 ; CHECK-NEXT: .byte 0 ; CHECK-NEXT: .short 4 -; CHECK-NEXT: .short 5 +; CHECK-NEXT: .short 6 ; CHECK-NEXT: .short 0 ; CHECK-NEXT: .long 0 ; CHECK: .byte 1 ; CHECK-NEXT: .byte 0 ; CHECK-NEXT: .short 4 -; CHECK-NEXT: .short 4 +; CHECK-NEXT: .short 3 ; CHECK-NEXT: .short 0 ; CHECK-NEXT: .long 0 ; CHECK: Ltmp2-_test2 diff --git a/llvm/test/CodeGen/X86/statepoint-stack-usage.ll b/llvm/test/CodeGen/X86/statepoint-stack-usage.ll index 73b0d6a18071..6e7fc7bf1c07 100644 --- a/llvm/test/CodeGen/X86/statepoint-stack-usage.ll +++ b/llvm/test/CodeGen/X86/statepoint-stack-usage.ll @@ -61,9 +61,9 @@ define i32 @back_to_back_deopt(i32 %a, i32 %b, i32 %c) #1 gc "statepoint-example" { ; CHECK-LABEL: back_to_back_deopt ; The exact stores don't matter, but there need to be three stack slots created -; CHECK-DAG: movl %edi, 12(%rsp) -; CHECK-DAG: movl %esi, 8(%rsp) -; CHECK-DAG: movl %edx, 4(%rsp) +; CHECK-DAG: movl %ebx, 12(%rsp) +; CHECK-DAG: movl %ebp, 8(%rsp) +; CHECK-DAG: movl %r14d, 4(%rsp) ; CHECK: callq ; CHECK-DAG: movl %ebx, 12(%rsp) ; CHECK-DAG: movl %ebp, 8(%rsp) diff --git a/llvm/test/CodeGen/X86/vec_fp_to_int.ll b/llvm/test/CodeGen/X86/vec_fp_to_int.ll index a092dd100e99..6cfe41ac503d 100644 --- a/llvm/test/CodeGen/X86/vec_fp_to_int.ll +++ b/llvm/test/CodeGen/X86/vec_fp_to_int.ll @@ -1018,12 +1018,12 @@ define <4 x i64> @fptosi_4f32_to_4i64(<8 x float> %a) { ; SSE-NEXT: cvttss2si %xmm0, %rax ; SSE-NEXT: movq %rax, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] ; SSE-NEXT: cvttss2si %xmm1, %rax ; SSE-NEXT: movq %rax, %xmm1 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3] ; SSE-NEXT: cvttss2si %xmm1, %rax ; SSE-NEXT: movq %rax, %xmm3 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] @@ -1126,12 +1126,12 @@ define <4 x i64> @fptosi_8f32_to_4i64(<8 x float> %a) { ; SSE-NEXT: cvttss2si %xmm0, %rax ; SSE-NEXT: movq %rax, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] ; SSE-NEXT: cvttss2si %xmm1, %rax ; SSE-NEXT: movq %rax, %xmm1 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3] ; SSE-NEXT: cvttss2si %xmm1, %rax ; SSE-NEXT: movq %rax, %xmm3 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] @@ -1316,11 +1316,11 @@ define <4 x i32> @fptoui_4f32_to_4i32(<4 x float> %a) { ; SSE-LABEL: fptoui_4f32_to_4i32: ; SSE: # BB#0: ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3] ; SSE-NEXT: cvttss2si %xmm1, %rax ; SSE-NEXT: movd %eax, %xmm1 ; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] +; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1] ; SSE-NEXT: cvttss2si %xmm2, %rax ; SSE-NEXT: movd %eax, %xmm2 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] @@ -1560,7 +1560,7 @@ define <8 x i32> @fptoui_8f32_to_8i32(<8 x float> %a) { ; SSE-NEXT: cvttss2si %xmm0, %rax ; SSE-NEXT: movd %eax, %xmm0 ; SSE-NEXT: movaps %xmm2, %xmm3 -; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm2[1],xmm3[1] +; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1] ; SSE-NEXT: cvttss2si %xmm3, %rax ; SSE-NEXT: movd %eax, %xmm3 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1] @@ -1572,11 +1572,11 @@ define <8 x i32> @fptoui_8f32_to_8i32(<8 x float> %a) { ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] ; SSE-NEXT: movaps %xmm1, %xmm2 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm1[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3] ; SSE-NEXT: cvttss2si %xmm2, %rax ; SSE-NEXT: movd %eax, %xmm2 ; SSE-NEXT: movaps %xmm1, %xmm3 -; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm1[1],xmm3[1] +; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1] ; SSE-NEXT: cvttss2si %xmm3, %rax ; SSE-NEXT: movd %eax, %xmm3 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] @@ -1687,7 +1687,7 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) { ; SSE-NEXT: cmovaeq %rcx, %rdx ; SSE-NEXT: movq %rdx, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3] ; SSE-NEXT: movaps %xmm3, %xmm4 ; SSE-NEXT: subss %xmm1, %xmm4 ; SSE-NEXT: cvttss2si %xmm4, %rcx @@ -1698,7 +1698,7 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) { ; SSE-NEXT: movq %rdx, %xmm3 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] ; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3] ; SSE-NEXT: movaps %xmm3, %xmm4 ; SSE-NEXT: subss %xmm1, %xmm4 ; SSE-NEXT: cvttss2si %xmm4, %rcx @@ -1865,7 +1865,7 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8 x float> %a) { ; SSE-NEXT: cmovaeq %rcx, %rdx ; SSE-NEXT: movq %rdx, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3] ; SSE-NEXT: movaps %xmm3, %xmm4 ; SSE-NEXT: subss %xmm1, %xmm4 ; SSE-NEXT: cvttss2si %xmm4, %rcx @@ -1876,7 +1876,7 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8 x float> %a) { ; SSE-NEXT: movq %rdx, %xmm3 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] ; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3] +; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3] ; SSE-NEXT: movaps %xmm3, %xmm4 ; SSE-NEXT: subss %xmm1, %xmm4 ; SSE-NEXT: cvttss2si %xmm4, %rcx diff --git a/llvm/test/CodeGen/X86/vec_int_to_fp.ll b/llvm/test/CodeGen/X86/vec_int_to_fp.ll index 075a0d7b2c25..7cb1c95cb01a 100644 --- a/llvm/test/CodeGen/X86/vec_int_to_fp.ll +++ b/llvm/test/CodeGen/X86/vec_int_to_fp.ll @@ -1611,7 +1611,7 @@ define <4 x float> @uitofp_2i64_to_4f32(<2 x i64> %a) { ; SSE-LABEL: uitofp_2i64_to_4f32: ; SSE: # BB#0: ; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: movq %xmm0, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB39_1 ; SSE-NEXT: # BB#2: @@ -1839,7 +1839,7 @@ define <4 x float> @uitofp_4i64_to_4f32_undef(<2 x i64> %a) { ; SSE-LABEL: uitofp_4i64_to_4f32_undef: ; SSE: # BB#0: ; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: movq %xmm0, %rax +; SSE-NEXT: movq %xmm1, %rax ; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: js .LBB41_1 ; SSE-NEXT: # BB#2: diff --git a/llvm/test/CodeGen/X86/vec_minmax_sint.ll b/llvm/test/CodeGen/X86/vec_minmax_sint.ll index 41167eedaa7e..5999116deb9c 100644 --- a/llvm/test/CodeGen/X86/vec_minmax_sint.ll +++ b/llvm/test/CodeGen/X86/vec_minmax_sint.ll @@ -437,7 +437,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) { ; SSE42: # BB#0: ; SSE42-NEXT: movdqa %xmm0, %xmm2 ; SSE42-NEXT: movdqa %xmm1, %xmm3 -; SSE42-NEXT: pcmpgtq %xmm0, %xmm3 +; SSE42-NEXT: pcmpgtq %xmm2, %xmm3 ; SSE42-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE42-NEXT: pxor %xmm3, %xmm0 ; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1 diff --git a/llvm/test/CodeGen/X86/vec_shift4.ll b/llvm/test/CodeGen/X86/vec_shift4.ll index 7d5969fb719e..bef2438aecd1 100644 --- a/llvm/test/CodeGen/X86/vec_shift4.ll +++ b/llvm/test/CodeGen/X86/vec_shift4.ll @@ -35,7 +35,7 @@ define <2 x i64> @shl2(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp { ; X32: # BB#0: # %entry ; X32-NEXT: movdqa %xmm0, %xmm2 ; X32-NEXT: psllw $5, %xmm1 -; X32-NEXT: movdqa %xmm0, %xmm3 +; X32-NEXT: movdqa %xmm2, %xmm3 ; X32-NEXT: psllw $4, %xmm3 ; X32-NEXT: pand {{\.LCPI.*}}, %xmm3 ; X32-NEXT: movdqa %xmm1, %xmm0 @@ -47,7 +47,7 @@ define <2 x i64> @shl2(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp { ; X32-NEXT: movdqa %xmm1, %xmm0 ; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; X32-NEXT: movdqa %xmm2, %xmm3 -; X32-NEXT: paddb %xmm2, %xmm3 +; X32-NEXT: paddb %xmm3, %xmm3 ; X32-NEXT: paddb %xmm1, %xmm1 ; X32-NEXT: movdqa %xmm1, %xmm0 ; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2 @@ -58,7 +58,7 @@ define <2 x i64> @shl2(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp { ; X64: # BB#0: # %entry ; X64-NEXT: movdqa %xmm0, %xmm2 ; X64-NEXT: psllw $5, %xmm1 -; X64-NEXT: movdqa %xmm0, %xmm3 +; X64-NEXT: movdqa %xmm2, %xmm3 ; X64-NEXT: psllw $4, %xmm3 ; X64-NEXT: pand {{.*}}(%rip), %xmm3 ; X64-NEXT: movdqa %xmm1, %xmm0 @@ -70,7 +70,7 @@ define <2 x i64> @shl2(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp { ; X64-NEXT: movdqa %xmm1, %xmm0 ; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; X64-NEXT: movdqa %xmm2, %xmm3 -; X64-NEXT: paddb %xmm2, %xmm3 +; X64-NEXT: paddb %xmm3, %xmm3 ; X64-NEXT: paddb %xmm1, %xmm1 ; X64-NEXT: movdqa %xmm1, %xmm0 ; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2 diff --git a/llvm/test/CodeGen/X86/vector-blend.ll b/llvm/test/CodeGen/X86/vector-blend.ll index 1ec5cc7570ab..ab5fac59ebd1 100644 --- a/llvm/test/CodeGen/X86/vector-blend.ll +++ b/llvm/test/CodeGen/X86/vector-blend.ll @@ -992,7 +992,7 @@ define <4 x i32> @blend_neg_logic_v4i32_2(<4 x i32> %v, <4 x i32> %c) { ; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: psrad $31, %xmm1 ; SSE41-NEXT: pxor %xmm3, %xmm3 -; SSE41-NEXT: psubd %xmm0, %xmm3 +; SSE41-NEXT: psubd %xmm2, %xmm3 ; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: blendvps %xmm0, %xmm2, %xmm3 ; SSE41-NEXT: movaps %xmm3, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll b/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll index 5bddfe135d4d..87cf2026d1ef 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll @@ -176,13 +176,13 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind { ; SSE2-LABEL: test_div7_16i8: ; SSE2: # BB#0: ; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427] ; SSE2-NEXT: pmullw %xmm3, %xmm2 ; SSE2-NEXT: psrlw $8, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: pmullw %xmm3, %xmm1 ; SSE2-NEXT: psrlw $8, %xmm1 @@ -482,13 +482,13 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; SSE2-LABEL: test_rem7_16i8: ; SSE2: # BB#0: ; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427] ; SSE2-NEXT: pmullw %xmm3, %xmm2 ; SSE2-NEXT: psrlw $8, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: pmullw %xmm3, %xmm1 ; SSE2-NEXT: psrlw $8, %xmm1 @@ -504,7 +504,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 ; SSE2-NEXT: paddb %xmm2, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7] ; SSE2-NEXT: pmullw %xmm3, %xmm2 diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll index 5a31bdd39652..8138442b3eaf 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll @@ -481,7 +481,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; SSE2-NEXT: psrlw $2, %xmm1 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7] ; SSE2-NEXT: pmullw %xmm3, %xmm2 diff --git a/llvm/test/CodeGen/X86/vector-rotate-128.ll b/llvm/test/CodeGen/X86/vector-rotate-128.ll index 8cd8d810eaa8..5fafecae23d7 100644 --- a/llvm/test/CodeGen/X86/vector-rotate-128.ll +++ b/llvm/test/CodeGen/X86/vector-rotate-128.ll @@ -361,7 +361,7 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind { ; SSE41-NEXT: psllw $4, %xmm1 ; SSE41-NEXT: por %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm4 -; SSE41-NEXT: paddw %xmm1, %xmm4 +; SSE41-NEXT: paddw %xmm4, %xmm4 ; SSE41-NEXT: movdqa %xmm3, %xmm6 ; SSE41-NEXT: psllw $8, %xmm6 ; SSE41-NEXT: movdqa %xmm3, %xmm5 @@ -386,7 +386,7 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind { ; SSE41-NEXT: psllw $4, %xmm2 ; SSE41-NEXT: por %xmm0, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm1 -; SSE41-NEXT: paddw %xmm2, %xmm1 +; SSE41-NEXT: paddw %xmm1, %xmm1 ; SSE41-NEXT: movdqa %xmm3, %xmm4 ; SSE41-NEXT: psrlw $8, %xmm4 ; SSE41-NEXT: movdqa %xmm2, %xmm0 @@ -631,10 +631,10 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8] ; SSE41-NEXT: psubb %xmm3, %xmm2 ; SSE41-NEXT: psllw $5, %xmm3 -; SSE41-NEXT: movdqa %xmm0, %xmm5 +; SSE41-NEXT: movdqa %xmm1, %xmm5 ; SSE41-NEXT: psllw $4, %xmm5 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm5 -; SSE41-NEXT: movdqa %xmm0, %xmm4 +; SSE41-NEXT: movdqa %xmm1, %xmm4 ; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4 ; SSE41-NEXT: movdqa %xmm4, %xmm5 @@ -644,13 +644,13 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { ; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4 ; SSE41-NEXT: movdqa %xmm4, %xmm5 -; SSE41-NEXT: paddb %xmm4, %xmm5 +; SSE41-NEXT: paddb %xmm5, %xmm5 ; SSE41-NEXT: paddb %xmm3, %xmm3 ; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4 ; SSE41-NEXT: psllw $5, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm3 -; SSE41-NEXT: paddb %xmm2, %xmm3 +; SSE41-NEXT: paddb %xmm3, %xmm3 ; SSE41-NEXT: movdqa %xmm1, %xmm5 ; SSE41-NEXT: psrlw $4, %xmm5 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm5 @@ -1191,7 +1191,7 @@ define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind { ; SSE41-LABEL: constant_rotate_v16i8: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: movdqa %xmm1, %xmm3 ; SSE41-NEXT: psllw $4, %xmm3 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,57600,41152,24704,8256] @@ -1203,7 +1203,7 @@ define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind { ; SSE41-NEXT: paddb %xmm0, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm3 -; SSE41-NEXT: paddb %xmm2, %xmm3 +; SSE41-NEXT: paddb %xmm3, %xmm3 ; SSE41-NEXT: paddb %xmm0, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; SSE41-NEXT: movdqa %xmm1, %xmm3 diff --git a/llvm/test/CodeGen/X86/vector-sext.ll b/llvm/test/CodeGen/X86/vector-sext.ll index 74003cd9919e..392c0de95f24 100644 --- a/llvm/test/CodeGen/X86/vector-sext.ll +++ b/llvm/test/CodeGen/X86/vector-sext.ll @@ -243,7 +243,7 @@ define <8 x i32> @sext_16i8_to_8i32(<16 x i8> %A) nounwind uwtable readnone ssp ; SSSE3-LABEL: sext_16i8_to_8i32: ; SSSE3: # BB#0: # %entry ; SSSE3-NEXT: movdqa %xmm0, %xmm1 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSSE3-NEXT: psrad $24, %xmm0 ; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[u,u,u,4,u,u,u,5,u,u,u,6,u,u,u,7] @@ -312,7 +312,7 @@ define <16 x i32> @sext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss ; SSSE3-LABEL: sext_16i8_to_16i32: ; SSSE3: # BB#0: # %entry ; SSSE3-NEXT: movdqa %xmm0, %xmm3 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSSE3-NEXT: psrad $24, %xmm0 ; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15] @@ -443,7 +443,7 @@ define <4 x i64> @sext_16i8_to_4i64(<16 x i8> %A) nounwind uwtable readnone ssp ; SSSE3-LABEL: sext_16i8_to_4i64: ; SSSE3: # BB#0: # %entry ; SSSE3-NEXT: movdqa %xmm0, %xmm1 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSSE3-NEXT: movdqa %xmm0, %xmm2 ; SSSE3-NEXT: psrad $31, %xmm2 @@ -499,7 +499,7 @@ define <8 x i64> @sext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp ; SSE2-LABEL: sext_16i8_to_8i64: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: psrad $31, %xmm2 @@ -1112,7 +1112,7 @@ define <8 x i64> @sext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp ; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: psrad $31, %xmm3 -; SSE2-NEXT: movdqa %xmm1, %xmm4 +; SSE2-NEXT: movdqa %xmm2, %xmm4 ; SSE2-NEXT: psrad $31, %xmm4 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] @@ -1131,7 +1131,7 @@ define <8 x i64> @sext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp ; SSSE3-NEXT: movdqa %xmm1, %xmm2 ; SSSE3-NEXT: movdqa %xmm0, %xmm3 ; SSSE3-NEXT: psrad $31, %xmm3 -; SSSE3-NEXT: movdqa %xmm1, %xmm4 +; SSSE3-NEXT: movdqa %xmm2, %xmm4 ; SSSE3-NEXT: psrad $31, %xmm4 ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] @@ -2228,7 +2228,7 @@ define <8 x i32> @load_sext_8i1_to_8i32(<8 x i1> *%ptr) { ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSE2-NEXT: movdqa %xmm1, %xmm0 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSE2-NEXT: pslld $31, %xmm0 ; SSE2-NEXT: psrad $31, %xmm0 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] @@ -2277,7 +2277,7 @@ define <8 x i32> @load_sext_8i1_to_8i32(<8 x i1> *%ptr) { ; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSSE3-NEXT: pslld $31, %xmm0 ; SSSE3-NEXT: psrad $31, %xmm0 ; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] @@ -3079,7 +3079,7 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) { ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE2-NEXT: movdqa %xmm1, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psllw $15, %xmm0 ; SSE2-NEXT: psraw $15, %xmm0 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] @@ -3168,7 +3168,7 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) { ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSSE3-NEXT: psllw $15, %xmm0 ; SSSE3-NEXT: psraw $15, %xmm0 ; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll index 58ec6aff86f8..a5e2cb66eba8 100644 --- a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll @@ -274,7 +274,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind { ; SSE41-NEXT: psllw $4, %xmm1 ; SSE41-NEXT: por %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: paddw %xmm1, %xmm3 +; SSE41-NEXT: paddw %xmm3, %xmm3 ; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: psraw $8, %xmm4 ; SSE41-NEXT: movdqa %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll index 0aab957422ed..9b44ad1dac30 100644 --- a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll @@ -245,7 +245,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind { ; SSE41-NEXT: psllw $4, %xmm1 ; SSE41-NEXT: por %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: paddw %xmm1, %xmm3 +; SSE41-NEXT: paddw %xmm3, %xmm3 ; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: psrlw $8, %xmm4 ; SSE41-NEXT: movdqa %xmm1, %xmm0 @@ -407,7 +407,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { ; SSE41: # BB#0: ; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: psllw $5, %xmm1 -; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: movdqa %xmm2, %xmm3 ; SSE41-NEXT: psrlw $4, %xmm3 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 ; SSE41-NEXT: movdqa %xmm1, %xmm0 @@ -679,7 +679,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { ; SSE41-NEXT: pshufb %xmm0, %xmm1 ; SSE41-NEXT: psllw $5, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: paddb %xmm1, %xmm3 +; SSE41-NEXT: paddb %xmm3, %xmm3 ; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: psrlw $4, %xmm4 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm4 @@ -1101,7 +1101,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind { ; SSE41-LABEL: constant_shift_v16i8: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: movdqa %xmm1, %xmm2 ; SSE41-NEXT: psrlw $4, %xmm2 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm2 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,49376,32928,16480,32] diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll index 304af4e38d66..568bf6e974f7 100644 --- a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll @@ -202,7 +202,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind { ; SSE41-NEXT: psllw $4, %xmm1 ; SSE41-NEXT: por %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: paddw %xmm1, %xmm3 +; SSE41-NEXT: paddw %xmm3, %xmm3 ; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: psllw $8, %xmm4 ; SSE41-NEXT: movdqa %xmm1, %xmm0 @@ -361,7 +361,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { ; SSE41: # BB#0: ; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: psllw $5, %xmm1 -; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: movdqa %xmm2, %xmm3 ; SSE41-NEXT: psllw $4, %xmm3 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 ; SSE41-NEXT: movdqa %xmm1, %xmm0 @@ -373,7 +373,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { ; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm3 -; SSE41-NEXT: paddb %xmm2, %xmm3 +; SSE41-NEXT: paddb %xmm3, %xmm3 ; SSE41-NEXT: paddb %xmm1, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 @@ -627,7 +627,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { ; SSE41-NEXT: pshufb %xmm0, %xmm1 ; SSE41-NEXT: psllw $5, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: paddb %xmm1, %xmm3 +; SSE41-NEXT: paddb %xmm3, %xmm3 ; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: psllw $4, %xmm4 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm4 @@ -639,7 +639,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { ; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm1 -; SSE41-NEXT: paddb %xmm2, %xmm1 +; SSE41-NEXT: paddb %xmm1, %xmm1 ; SSE41-NEXT: paddb %xmm3, %xmm3 ; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2 @@ -957,7 +957,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind { ; SSE41-LABEL: constant_shift_v16i8: ; SSE41: # BB#0: ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: movdqa %xmm1, %xmm2 ; SSE41-NEXT: psllw $4, %xmm2 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm2 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,49376,32928,16480,32] @@ -968,7 +968,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind { ; SSE41-NEXT: paddb %xmm0, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: paddb %xmm1, %xmm2 +; SSE41-NEXT: paddb %xmm2, %xmm2 ; SSE41-NEXT: paddb %xmm0, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll index 693594947520..e04c5321fa25 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll @@ -2792,7 +2792,7 @@ define <4 x float> @PR22377(<4 x float> %a, <4 x float> %b) { ; SSE-LABEL: PR22377: ; SSE: # BB#0: # %entry ; SSE-NEXT: movaps %xmm0, %xmm1 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm0[1,3] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3,1,3] ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,0,2] ; SSE-NEXT: addps %xmm0, %xmm1 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] diff --git a/llvm/test/CodeGen/X86/vector-trunc-math.ll b/llvm/test/CodeGen/X86/vector-trunc-math.ll index 18da742f119e..d4fbb72bbe6d 100644 --- a/llvm/test/CodeGen/X86/vector-trunc-math.ll +++ b/llvm/test/CodeGen/X86/vector-trunc-math.ll @@ -5198,7 +5198,7 @@ define <4 x i32> @mul_add_const_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwi ; SSE-LABEL: mul_add_const_v4i64_v4i32: ; SSE: # BB#0: ; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3] ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,3,3] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,1,1,3] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3] diff --git a/llvm/test/CodeGen/X86/vector-zext.ll b/llvm/test/CodeGen/X86/vector-zext.ll index fb4581487a35..fe3523de3575 100644 --- a/llvm/test/CodeGen/X86/vector-zext.ll +++ b/llvm/test/CodeGen/X86/vector-zext.ll @@ -246,7 +246,7 @@ define <16 x i32> @zext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: movdqa %xmm3, %xmm1 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] @@ -261,7 +261,7 @@ define <16 x i32> @zext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss ; SSSE3: # BB#0: # %entry ; SSSE3-NEXT: movdqa %xmm0, %xmm3 ; SSSE3-NEXT: pxor %xmm4, %xmm4 -; SSSE3-NEXT: movdqa %xmm0, %xmm1 +; SSSE3-NEXT: movdqa %xmm3, %xmm1 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] ; SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] @@ -399,7 +399,7 @@ define <8 x i64> @zext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] ; SSE2-NEXT: movdqa %xmm1, %xmm0 @@ -700,7 +700,7 @@ define <8 x i64> @zext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: movdqa %xmm3, %xmm1 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] @@ -715,7 +715,7 @@ define <8 x i64> @zext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp ; SSSE3: # BB#0: # %entry ; SSSE3-NEXT: movdqa %xmm0, %xmm3 ; SSSE3-NEXT: pxor %xmm4, %xmm4 -; SSSE3-NEXT: movdqa %xmm0, %xmm1 +; SSSE3-NEXT: movdqa %xmm3, %xmm1 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] ; SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] @@ -1582,7 +1582,7 @@ define <8 x i32> @shuf_zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ; SSE41: # BB#0: # %entry ; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: pxor %xmm2, %xmm2 -; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero ; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] ; SSE41-NEXT: retq ; @@ -1630,7 +1630,7 @@ define <4 x i64> @shuf_zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ; SSE41: # BB#0: # %entry ; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: pxor %xmm2, %xmm2 -; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero +; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero ; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; SSE41-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vselect-minmax.ll b/llvm/test/CodeGen/X86/vselect-minmax.ll index c6a8675c3e7f..5524eaf397c9 100644 --- a/llvm/test/CodeGen/X86/vselect-minmax.ll +++ b/llvm/test/CodeGen/X86/vselect-minmax.ll @@ -3344,12 +3344,12 @@ define <64 x i8> @test98(<64 x i8> %a, <64 x i8> %b) { ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm3, %xmm8 ; SSE2-NEXT: movdqa %xmm2, %xmm9 -; SSE2-NEXT: movdqa %xmm3, %xmm12 +; SSE2-NEXT: movdqa %xmm8, %xmm12 ; SSE2-NEXT: pcmpgtb %xmm7, %xmm12 ; SSE2-NEXT: pcmpeqd %xmm13, %xmm13 ; SSE2-NEXT: movdqa %xmm12, %xmm3 ; SSE2-NEXT: pxor %xmm13, %xmm3 -; SSE2-NEXT: movdqa %xmm2, %xmm14 +; SSE2-NEXT: movdqa %xmm9, %xmm14 ; SSE2-NEXT: pcmpgtb %xmm6, %xmm14 ; SSE2-NEXT: movdqa %xmm14, %xmm2 ; SSE2-NEXT: pxor %xmm13, %xmm2 @@ -3487,12 +3487,12 @@ define <64 x i8> @test100(<64 x i8> %a, <64 x i8> %b) { ; SSE2-NEXT: movdqa %xmm2, %xmm9 ; SSE2-NEXT: movdqa %xmm0, %xmm10 ; SSE2-NEXT: movdqa %xmm7, %xmm12 -; SSE2-NEXT: pcmpgtb %xmm3, %xmm12 +; SSE2-NEXT: pcmpgtb %xmm8, %xmm12 ; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE2-NEXT: movdqa %xmm12, %xmm3 ; SSE2-NEXT: pxor %xmm0, %xmm3 ; SSE2-NEXT: movdqa %xmm6, %xmm13 -; SSE2-NEXT: pcmpgtb %xmm2, %xmm13 +; SSE2-NEXT: pcmpgtb %xmm9, %xmm13 ; SSE2-NEXT: movdqa %xmm13, %xmm2 ; SSE2-NEXT: pxor %xmm0, %xmm2 ; SSE2-NEXT: movdqa %xmm5, %xmm14 @@ -4225,12 +4225,12 @@ define <16 x i32> @test114(<16 x i32> %a, <16 x i32> %b) { ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm3, %xmm8 ; SSE2-NEXT: movdqa %xmm2, %xmm9 -; SSE2-NEXT: movdqa %xmm3, %xmm12 +; SSE2-NEXT: movdqa %xmm8, %xmm12 ; SSE2-NEXT: pcmpgtd %xmm7, %xmm12 ; SSE2-NEXT: pcmpeqd %xmm13, %xmm13 ; SSE2-NEXT: movdqa %xmm12, %xmm3 ; SSE2-NEXT: pxor %xmm13, %xmm3 -; SSE2-NEXT: movdqa %xmm2, %xmm14 +; SSE2-NEXT: movdqa %xmm9, %xmm14 ; SSE2-NEXT: pcmpgtd %xmm6, %xmm14 ; SSE2-NEXT: movdqa %xmm14, %xmm2 ; SSE2-NEXT: pxor %xmm13, %xmm2 @@ -4368,12 +4368,12 @@ define <16 x i32> @test116(<16 x i32> %a, <16 x i32> %b) { ; SSE2-NEXT: movdqa %xmm2, %xmm9 ; SSE2-NEXT: movdqa %xmm0, %xmm10 ; SSE2-NEXT: movdqa %xmm7, %xmm12 -; SSE2-NEXT: pcmpgtd %xmm3, %xmm12 +; SSE2-NEXT: pcmpgtd %xmm8, %xmm12 ; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE2-NEXT: movdqa %xmm12, %xmm3 ; SSE2-NEXT: pxor %xmm0, %xmm3 ; SSE2-NEXT: movdqa %xmm6, %xmm13 -; SSE2-NEXT: pcmpgtd %xmm2, %xmm13 +; SSE2-NEXT: pcmpgtd %xmm9, %xmm13 ; SSE2-NEXT: movdqa %xmm13, %xmm2 ; SSE2-NEXT: pxor %xmm0, %xmm2 ; SSE2-NEXT: movdqa %xmm5, %xmm14 @@ -4890,7 +4890,7 @@ define <8 x i64> @test122(<8 x i64> %a, <8 x i64> %b) { ; SSE2-LABEL: test122: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm8 -; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -5164,7 +5164,7 @@ define <8 x i64> @test124(<8 x i64> %a, <8 x i64> %b) { ; SSE2-LABEL: test124: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm11 -; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -5467,7 +5467,7 @@ define <8 x i64> @test126(<8 x i64> %a, <8 x i64> %b) { ; SSE2-LABEL: test126: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm8 -; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -5795,7 +5795,7 @@ define <8 x i64> @test128(<8 x i64> %a, <8 x i64> %b) { ; SSE2-LABEL: test128: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm11 -; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -6047,7 +6047,7 @@ define <64 x i8> @test130(<64 x i8> %a, <64 x i8> %b) { ; SSE2-NEXT: pcmpeqd %xmm13, %xmm13 ; SSE2-NEXT: movdqa %xmm12, %xmm9 ; SSE2-NEXT: pxor %xmm13, %xmm9 -; SSE2-NEXT: movdqa %xmm2, %xmm14 +; SSE2-NEXT: movdqa %xmm8, %xmm14 ; SSE2-NEXT: pcmpgtb %xmm6, %xmm14 ; SSE2-NEXT: movdqa %xmm14, %xmm2 ; SSE2-NEXT: pxor %xmm13, %xmm2 @@ -6190,7 +6190,7 @@ define <64 x i8> @test132(<64 x i8> %a, <64 x i8> %b) { ; SSE2-NEXT: movdqa %xmm12, %xmm9 ; SSE2-NEXT: pxor %xmm0, %xmm9 ; SSE2-NEXT: movdqa %xmm6, %xmm13 -; SSE2-NEXT: pcmpgtb %xmm2, %xmm13 +; SSE2-NEXT: pcmpgtb %xmm8, %xmm13 ; SSE2-NEXT: movdqa %xmm13, %xmm2 ; SSE2-NEXT: pxor %xmm0, %xmm2 ; SSE2-NEXT: movdqa %xmm5, %xmm14 @@ -6941,7 +6941,7 @@ define <16 x i32> @test146(<16 x i32> %a, <16 x i32> %b) { ; SSE2-NEXT: pcmpeqd %xmm13, %xmm13 ; SSE2-NEXT: movdqa %xmm12, %xmm9 ; SSE2-NEXT: pxor %xmm13, %xmm9 -; SSE2-NEXT: movdqa %xmm2, %xmm14 +; SSE2-NEXT: movdqa %xmm8, %xmm14 ; SSE2-NEXT: pcmpgtd %xmm6, %xmm14 ; SSE2-NEXT: movdqa %xmm14, %xmm2 ; SSE2-NEXT: pxor %xmm13, %xmm2 @@ -7084,7 +7084,7 @@ define <16 x i32> @test148(<16 x i32> %a, <16 x i32> %b) { ; SSE2-NEXT: movdqa %xmm12, %xmm9 ; SSE2-NEXT: pxor %xmm0, %xmm9 ; SSE2-NEXT: movdqa %xmm6, %xmm13 -; SSE2-NEXT: pcmpgtd %xmm2, %xmm13 +; SSE2-NEXT: pcmpgtd %xmm8, %xmm13 ; SSE2-NEXT: movdqa %xmm13, %xmm2 ; SSE2-NEXT: pxor %xmm0, %xmm2 ; SSE2-NEXT: movdqa %xmm5, %xmm14 @@ -7610,7 +7610,7 @@ define <8 x i64> @test154(<8 x i64> %a, <8 x i64> %b) { ; SSE2-LABEL: test154: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm8 -; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -7882,7 +7882,7 @@ define <8 x i64> @test156(<8 x i64> %a, <8 x i64> %b) { ; SSE2-LABEL: test156: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm11 -; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -8183,7 +8183,7 @@ define <8 x i64> @test158(<8 x i64> %a, <8 x i64> %b) { ; SSE2-LABEL: test158: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm8 -; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -8509,7 +8509,7 @@ define <8 x i64> @test160(<8 x i64> %a, <8 x i64> %b) { ; SSE2-LABEL: test160: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm7, %xmm11 -; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm1, %xmm2 @@ -10289,7 +10289,7 @@ define <2 x i64> @test180(<2 x i64> %a, <2 x i64> %b) { ; SSE4: # BB#0: # %entry ; SSE4-NEXT: movdqa %xmm0, %xmm2 ; SSE4-NEXT: movdqa %xmm1, %xmm3 -; SSE4-NEXT: pcmpgtq %xmm0, %xmm3 +; SSE4-NEXT: pcmpgtq %xmm2, %xmm3 ; SSE4-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE4-NEXT: pxor %xmm3, %xmm0 ; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm1 @@ -10768,7 +10768,7 @@ define <2 x i64> @test188(<2 x i64> %a, <2 x i64> %b) { ; SSE4: # BB#0: # %entry ; SSE4-NEXT: movdqa %xmm0, %xmm2 ; SSE4-NEXT: movdqa %xmm1, %xmm3 -; SSE4-NEXT: pcmpgtq %xmm0, %xmm3 +; SSE4-NEXT: pcmpgtq %xmm2, %xmm3 ; SSE4-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE4-NEXT: pxor %xmm3, %xmm0 ; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm2 diff --git a/llvm/test/CodeGen/X86/widen_conv-3.ll b/llvm/test/CodeGen/X86/widen_conv-3.ll index 0124806a29aa..3b20f3515716 100644 --- a/llvm/test/CodeGen/X86/widen_conv-3.ll +++ b/llvm/test/CodeGen/X86/widen_conv-3.ll @@ -74,7 +74,7 @@ define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr) ; X86-SSE2-NEXT: cvtdq2ps %xmm0, %xmm0 ; X86-SSE2-NEXT: movss %xmm0, (%eax) ; X86-SSE2-NEXT: movaps %xmm0, %xmm1 -; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1] +; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] ; X86-SSE2-NEXT: movss %xmm1, 8(%eax) ; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X86-SSE2-NEXT: movss %xmm0, 4(%eax) diff --git a/llvm/test/CodeGen/X86/widen_conv-4.ll b/llvm/test/CodeGen/X86/widen_conv-4.ll index 178096cabac8..6dc938893d38 100644 --- a/llvm/test/CodeGen/X86/widen_conv-4.ll +++ b/llvm/test/CodeGen/X86/widen_conv-4.ll @@ -19,7 +19,7 @@ define void @convert_v7i16_v7f32(<7 x float>* %dst.addr, <7 x i16> %src) nounwin ; X86-SSE2-NEXT: movups %xmm0, (%eax) ; X86-SSE2-NEXT: movss %xmm2, 16(%eax) ; X86-SSE2-NEXT: movaps %xmm2, %xmm0 -; X86-SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm2[1],xmm0[1] +; X86-SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; X86-SSE2-NEXT: movss %xmm0, 24(%eax) ; X86-SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3] ; X86-SSE2-NEXT: movss %xmm2, 20(%eax) @@ -100,7 +100,7 @@ define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr) ; X86-SSE2-NEXT: cvtdq2ps %xmm0, %xmm0 ; X86-SSE2-NEXT: movss %xmm0, (%eax) ; X86-SSE2-NEXT: movaps %xmm0, %xmm1 -; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1] +; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] ; X86-SSE2-NEXT: movss %xmm1, 8(%eax) ; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X86-SSE2-NEXT: movss %xmm0, 4(%eax) diff --git a/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll b/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll index 579f847914a4..2899e38b71cd 100644 --- a/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll +++ b/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll @@ -23,7 +23,7 @@ target triple = "x86_64-apple-macosx" ; Compare the arguments and jump to exit. ; After the prologue is set. ; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]] -; CHECK-NEXT: cmpl %esi, %edi +; CHECK-NEXT: cmpl %esi, [[ARG0CPY]] ; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]] ; ; Store %a in the alloca. @@ -69,7 +69,7 @@ attributes #0 = { "no-frame-pointer-elim"="false" } ; Compare the arguments and jump to exit. ; After the prologue is set. ; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]] -; CHECK-NEXT: cmpl %esi, %edi +; CHECK-NEXT: cmpl %esi, [[ARG0CPY]] ; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]] ; ; Prologue code. @@ -115,7 +115,7 @@ attributes #1 = { "no-frame-pointer-elim"="true" } ; Compare the arguments and jump to exit. ; After the prologue is set. ; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]] -; CHECK-NEXT: cmpl %esi, %edi +; CHECK-NEXT: cmpl %esi, [[ARG0CPY]] ; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]] ; ; Prologue code. diff --git a/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll b/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll index d4b49437508a..519f0d0924e3 100644 --- a/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll +++ b/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll @@ -17,7 +17,7 @@ target triple = "x86_64-apple-macosx" ; Compare the arguments and jump to exit. ; No prologue needed. ; ENABLE: movl %edi, [[ARG0CPY:%e[a-z]+]] -; ENABLE-NEXT: cmpl %esi, %edi +; ENABLE-NEXT: cmpl %esi, [[ARG0CPY]] ; ENABLE-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]] ; ; Prologue code. @@ -27,7 +27,7 @@ target triple = "x86_64-apple-macosx" ; Compare the arguments and jump to exit. ; After the prologue is set. ; DISABLE: movl %edi, [[ARG0CPY:%e[a-z]+]] -; DISABLE-NEXT: cmpl %esi, %edi +; DISABLE-NEXT: cmpl %esi, [[ARG0CPY]] ; DISABLE-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]] ; ; Store %a in the alloca.