Revert "[MachineCopyPropagation] Extend pass to do COPY source forwarding"

This reverts commit r311038.

Several buildbots are breaking, and at least one appears to be due to
the forwarding of physical regs enabled by this change.  Reverting while
I investigate further.

llvm-svn: 311062
This commit is contained in:
Geoff Berry 2017-08-17 04:04:11 +00:00
parent dd8c16b58e
commit 4e38e02e6f
89 changed files with 495 additions and 1004 deletions

View File

@ -278,11 +278,6 @@ namespace llvm {
/// MachineSinking - This pass performs sinking on machine instructions. /// MachineSinking - This pass performs sinking on machine instructions.
extern char &MachineSinkingID; extern char &MachineSinkingID;
/// MachineCopyPropagationPreRegRewrite - This pass performs copy propagation
/// on machine instructions after register allocation but before virtual
/// register re-writing..
extern char &MachineCopyPropagationPreRegRewriteID;
/// MachineCopyPropagation - This pass performs copy propagation on /// MachineCopyPropagation - This pass performs copy propagation on
/// machine instructions. /// machine instructions.
extern char &MachineCopyPropagationID; extern char &MachineCopyPropagationID;

View File

@ -233,7 +233,6 @@ void initializeMachineBranchProbabilityInfoPass(PassRegistry&);
void initializeMachineCSEPass(PassRegistry&); void initializeMachineCSEPass(PassRegistry&);
void initializeMachineCombinerPass(PassRegistry&); void initializeMachineCombinerPass(PassRegistry&);
void initializeMachineCopyPropagationPass(PassRegistry&); void initializeMachineCopyPropagationPass(PassRegistry&);
void initializeMachineCopyPropagationPreRegRewritePass(PassRegistry&);
void initializeMachineDominanceFrontierPass(PassRegistry&); void initializeMachineDominanceFrontierPass(PassRegistry&);
void initializeMachineDominatorTreePass(PassRegistry&); void initializeMachineDominatorTreePass(PassRegistry&);
void initializeMachineFunctionPrinterPassPass(PassRegistry&); void initializeMachineFunctionPrinterPassPass(PassRegistry&);

View File

@ -54,7 +54,6 @@ void llvm::initializeCodeGen(PassRegistry &Registry) {
initializeMachineCSEPass(Registry); initializeMachineCSEPass(Registry);
initializeMachineCombinerPass(Registry); initializeMachineCombinerPass(Registry);
initializeMachineCopyPropagationPass(Registry); initializeMachineCopyPropagationPass(Registry);
initializeMachineCopyPropagationPreRegRewritePass(Registry);
initializeMachineDominatorTreePass(Registry); initializeMachineDominatorTreePass(Registry);
initializeMachineFunctionPrinterPassPass(Registry); initializeMachineFunctionPrinterPassPass(Registry);
initializeMachineLICMPass(Registry); initializeMachineLICMPass(Registry);

View File

@ -7,62 +7,18 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// //
// This is a simple MachineInstr-level copy forwarding pass. It may be run at // This is an extremely simple MachineInstr-level copy propagation pass.
// two places in the codegen pipeline:
// - After register allocation but before virtual registers have been remapped
// to physical registers.
// - After physical register remapping.
//
// The optimizations done vary slightly based on whether virtual registers are
// still present. In both cases, this pass forwards the source of COPYs to the
// users of their destinations when doing so is legal. For example:
//
// %vreg1 = COPY %vreg0
// ...
// ... = OP %vreg1
//
// If
// - the physical register assigned to %vreg0 has not been clobbered by the
// time of the use of %vreg1
// - the register class constraints are satisfied
// - the COPY def is the only value that reaches OP
// then this pass replaces the above with:
//
// %vreg1 = COPY %vreg0
// ...
// ... = OP %vreg0
//
// and updates the relevant state required by VirtRegMap (e.g. LiveIntervals).
// COPYs whose LiveIntervals become dead as a result of this forwarding (i.e. if
// all uses of %vreg1 are changed to %vreg0) are removed.
//
// When being run with only physical registers, this pass will also remove some
// redundant COPYs. For example:
//
// %R1 = COPY %R0
// ... // No clobber of %R1
// %R0 = COPY %R1 <<< Removed
//
// or
//
// %R1 = COPY %R0
// ... // No clobber of %R0
// %R1 = COPY %R0 <<< Removed
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "LiveDebugVariables.h"
#include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SetVector.h" #include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h" #include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LiveRangeEdit.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/Pass.h" #include "llvm/Pass.h"
#include "llvm/Support/Debug.h" #include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h" #include "llvm/Support/raw_ostream.h"
@ -74,48 +30,24 @@ using namespace llvm;
#define DEBUG_TYPE "machine-cp" #define DEBUG_TYPE "machine-cp"
STATISTIC(NumDeletes, "Number of dead copies deleted"); STATISTIC(NumDeletes, "Number of dead copies deleted");
STATISTIC(NumCopyForwards, "Number of copy uses forwarded");
namespace { namespace {
typedef SmallVector<unsigned, 4> RegList; typedef SmallVector<unsigned, 4> RegList;
typedef DenseMap<unsigned, RegList> SourceMap; typedef DenseMap<unsigned, RegList> SourceMap;
typedef DenseMap<unsigned, MachineInstr*> Reg2MIMap; typedef DenseMap<unsigned, MachineInstr*> Reg2MIMap;
class MachineCopyPropagation : public MachineFunctionPass, class MachineCopyPropagation : public MachineFunctionPass {
private LiveRangeEdit::Delegate {
const TargetRegisterInfo *TRI; const TargetRegisterInfo *TRI;
const TargetInstrInfo *TII; const TargetInstrInfo *TII;
MachineRegisterInfo *MRI; const MachineRegisterInfo *MRI;
MachineFunction *MF;
SlotIndexes *Indexes;
LiveIntervals *LIS;
const VirtRegMap *VRM;
// True if this pass being run before virtual registers are remapped to
// physical ones.
bool PreRegRewrite;
bool NoSubRegLiveness;
protected:
MachineCopyPropagation(char &ID, bool PreRegRewrite)
: MachineFunctionPass(ID), PreRegRewrite(PreRegRewrite) {}
public: public:
static char ID; // Pass identification, replacement for typeid static char ID; // Pass identification, replacement for typeid
MachineCopyPropagation() : MachineCopyPropagation(ID, false) { MachineCopyPropagation() : MachineFunctionPass(ID) {
initializeMachineCopyPropagationPass(*PassRegistry::getPassRegistry()); initializeMachineCopyPropagationPass(*PassRegistry::getPassRegistry());
} }
void getAnalysisUsage(AnalysisUsage &AU) const override { void getAnalysisUsage(AnalysisUsage &AU) const override {
if (PreRegRewrite) {
AU.addRequired<SlotIndexes>();
AU.addPreserved<SlotIndexes>();
AU.addRequired<LiveIntervals>();
AU.addPreserved<LiveIntervals>();
AU.addRequired<VirtRegMap>();
AU.addPreserved<VirtRegMap>();
AU.addPreserved<LiveDebugVariables>();
AU.addPreserved<LiveStacks>();
}
AU.setPreservesCFG(); AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU); MachineFunctionPass::getAnalysisUsage(AU);
} }
@ -123,10 +55,6 @@ namespace {
bool runOnMachineFunction(MachineFunction &MF) override; bool runOnMachineFunction(MachineFunction &MF) override;
MachineFunctionProperties getRequiredProperties() const override { MachineFunctionProperties getRequiredProperties() const override {
if (PreRegRewrite)
return MachineFunctionProperties()
.set(MachineFunctionProperties::Property::NoPHIs)
.set(MachineFunctionProperties::Property::TracksLiveness);
return MachineFunctionProperties().set( return MachineFunctionProperties().set(
MachineFunctionProperties::Property::NoVRegs); MachineFunctionProperties::Property::NoVRegs);
} }
@ -136,28 +64,6 @@ namespace {
void ReadRegister(unsigned Reg); void ReadRegister(unsigned Reg);
void CopyPropagateBlock(MachineBasicBlock &MBB); void CopyPropagateBlock(MachineBasicBlock &MBB);
bool eraseIfRedundant(MachineInstr &Copy, unsigned Src, unsigned Def); bool eraseIfRedundant(MachineInstr &Copy, unsigned Src, unsigned Def);
unsigned getPhysReg(unsigned Reg, unsigned SubReg);
unsigned getPhysReg(const MachineOperand &Opnd) {
return getPhysReg(Opnd.getReg(), Opnd.getSubReg());
}
unsigned getFullPhysReg(const MachineOperand &Opnd) {
return getPhysReg(Opnd.getReg(), 0);
}
void forwardUses(MachineInstr &MI);
bool isForwardableRegClassCopy(const MachineInstr &Copy,
const MachineInstr &UseI);
std::tuple<unsigned, unsigned, bool>
checkUseSubReg(const MachineOperand &CopySrc, const MachineOperand &MOUse);
bool hasImplicitOverlap(const MachineInstr &MI, const MachineOperand &Use);
void narrowRegClass(const MachineInstr &MI, const MachineOperand &MOUse,
unsigned NewUseReg, unsigned NewUseSubReg);
void updateForwardedCopyLiveInterval(const MachineInstr &Copy,
const MachineInstr &UseMI,
unsigned OrigUseReg,
unsigned NewUseReg,
unsigned NewUseSubReg);
/// LiveRangeEdit callback for eliminateDeadDefs().
void LRE_WillEraseInstruction(MachineInstr *MI) override;
/// Candidates for deletion. /// Candidates for deletion.
SmallSetVector<MachineInstr*, 8> MaybeDeadCopies; SmallSetVector<MachineInstr*, 8> MaybeDeadCopies;
@ -169,15 +75,6 @@ namespace {
SourceMap SrcMap; SourceMap SrcMap;
bool Changed; bool Changed;
}; };
class MachineCopyPropagationPreRegRewrite : public MachineCopyPropagation {
public:
static char ID; // Pass identification, replacement for typeid
MachineCopyPropagationPreRegRewrite()
: MachineCopyPropagation(ID, true) {
initializeMachineCopyPropagationPreRegRewritePass(*PassRegistry::getPassRegistry());
}
};
} }
char MachineCopyPropagation::ID = 0; char MachineCopyPropagation::ID = 0;
char &llvm::MachineCopyPropagationID = MachineCopyPropagation::ID; char &llvm::MachineCopyPropagationID = MachineCopyPropagation::ID;
@ -185,29 +82,6 @@ char &llvm::MachineCopyPropagationID = MachineCopyPropagation::ID;
INITIALIZE_PASS(MachineCopyPropagation, DEBUG_TYPE, INITIALIZE_PASS(MachineCopyPropagation, DEBUG_TYPE,
"Machine Copy Propagation Pass", false, false) "Machine Copy Propagation Pass", false, false)
/// We have two separate passes that are very similar, the only difference being
/// where they are meant to be run in the pipeline. This is done for several
/// reasons:
/// - the two passes have different dependencies
/// - some targets want to disable the later run of this pass, but not the
/// earlier one (e.g. NVPTX and WebAssembly)
/// - it allows for easier debugging via llc
char MachineCopyPropagationPreRegRewrite::ID = 0;
char &llvm::MachineCopyPropagationPreRegRewriteID = MachineCopyPropagationPreRegRewrite::ID;
INITIALIZE_PASS_BEGIN(MachineCopyPropagationPreRegRewrite,
"machine-cp-prerewrite",
"Machine Copy Propagation Pre-Register Rewrite Pass",
false, false)
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
INITIALIZE_PASS_END(MachineCopyPropagationPreRegRewrite,
"machine-cp-prerewrite",
"Machine Copy Propagation Pre-Register Rewrite Pass", false,
false)
/// Remove any entry in \p Map where the register is a subregister or equal to /// Remove any entry in \p Map where the register is a subregister or equal to
/// a register contained in \p Regs. /// a register contained in \p Regs.
static void removeRegsFromMap(Reg2MIMap &Map, const RegList &Regs, static void removeRegsFromMap(Reg2MIMap &Map, const RegList &Regs,
@ -248,10 +122,6 @@ void MachineCopyPropagation::ClobberRegister(unsigned Reg) {
} }
void MachineCopyPropagation::ReadRegister(unsigned Reg) { void MachineCopyPropagation::ReadRegister(unsigned Reg) {
// We don't track MaybeDeadCopies when running pre-VirtRegRewriter.
if (PreRegRewrite)
return;
// If 'Reg' is defined by a copy, the copy is no longer a candidate // If 'Reg' is defined by a copy, the copy is no longer a candidate
// for elimination. // for elimination.
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) { for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
@ -283,46 +153,6 @@ static bool isNopCopy(const MachineInstr &PreviousCopy, unsigned Src,
return SubIdx == TRI->getSubRegIndex(PreviousDef, Def); return SubIdx == TRI->getSubRegIndex(PreviousDef, Def);
} }
/// Return the physical register assigned to \p Reg if it is a virtual register,
/// otherwise just return the physical reg from the operand itself.
///
/// If \p SubReg is 0 then return the full physical register assigned to the
/// virtual register ignoring subregs. If we aren't tracking sub-reg liveness
/// then we need to use this to be more conservative with clobbers by killing
/// all super reg and their sub reg COPYs as well. This is to prevent COPY
/// forwarding in cases like the following:
///
/// %vreg2 = COPY %vreg1:sub1
/// %vreg3 = COPY %vreg1:sub0
/// ... = OP1 %vreg2
/// ... = OP2 %vreg3
///
/// After forward %vreg2 (assuming this is the last use of %vreg1) and
/// VirtRegRewriter adding kill markers we have:
///
/// %vreg3 = COPY %vreg1:sub0
/// ... = OP1 %vreg1:sub1<kill>
/// ... = OP2 %vreg3
///
/// If %vreg3 is assigned to a sub-reg of %vreg1, then after rewriting we have:
///
/// ... = OP1 R0:sub1, R0<imp-use,kill>
/// ... = OP2 R0:sub0
///
/// and the use of R0 by OP2 will not have a valid definition.
unsigned MachineCopyPropagation::getPhysReg(unsigned Reg, unsigned SubReg) {
// Physical registers cannot have subregs.
if (!TargetRegisterInfo::isVirtualRegister(Reg))
return Reg;
assert(PreRegRewrite && "Unexpected virtual register encountered");
Reg = VRM->getPhys(Reg);
if (SubReg && !NoSubRegLiveness)
Reg = TRI->getSubReg(Reg, SubReg);
return Reg;
}
/// Remove instruction \p Copy if there exists a previous copy that copies the /// Remove instruction \p Copy if there exists a previous copy that copies the
/// register \p Src to the register \p Def; This may happen indirectly by /// register \p Src to the register \p Def; This may happen indirectly by
/// copying the super registers. /// copying the super registers.
@ -360,325 +190,6 @@ bool MachineCopyPropagation::eraseIfRedundant(MachineInstr &Copy, unsigned Src,
return true; return true;
} }
/// Decide whether we should forward the destination of \param Copy to its use
/// in \param UseI based on the register class of the Copy operands. Same-class
/// COPYs are always accepted by this function, but cross-class COPYs are only
/// accepted if they are forwarded to another COPY with the operand register
/// classes reversed. For example:
///
/// RegClassA = COPY RegClassB // Copy parameter
/// ...
/// RegClassB = COPY RegClassA // UseI parameter
///
/// which after forwarding becomes
///
/// RegClassA = COPY RegClassB
/// ...
/// RegClassB = COPY RegClassB
///
/// so we have reduced the number of cross-class COPYs and potentially
/// introduced a no COPY that can be removed.
bool MachineCopyPropagation::isForwardableRegClassCopy(
const MachineInstr &Copy, const MachineInstr &UseI) {
auto isCross = [&](const MachineOperand &Dst, const MachineOperand &Src) {
unsigned DstReg = Dst.getReg();
unsigned SrcPhysReg = getPhysReg(Src);
const TargetRegisterClass *DstRC;
if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
DstRC = MRI->getRegClass(DstReg);
unsigned DstSubReg = Dst.getSubReg();
if (DstSubReg)
SrcPhysReg = TRI->getMatchingSuperReg(SrcPhysReg, DstSubReg, DstRC);
} else
DstRC = TRI->getMinimalPhysRegClass(DstReg);
return !DstRC->contains(SrcPhysReg);
};
const MachineOperand &CopyDst = Copy.getOperand(0);
const MachineOperand &CopySrc = Copy.getOperand(1);
if (!isCross(CopyDst, CopySrc))
return true;
if (!UseI.isCopy())
return false;
assert(getFullPhysReg(UseI.getOperand(1)) == getFullPhysReg(CopyDst));
return !isCross(UseI.getOperand(0), CopySrc);
}
/// Check that the subregs on the copy source operand (\p CopySrc) and the use
/// operand to be forwarded to (\p MOUse) are compatible with doing the
/// forwarding. Also computes the new register and subregister to be used in
/// the forwarded-to instruction.
std::tuple<unsigned, unsigned, bool> MachineCopyPropagation::checkUseSubReg(
const MachineOperand &CopySrc, const MachineOperand &MOUse) {
unsigned NewUseReg = CopySrc.getReg();
unsigned NewUseSubReg;
if (TargetRegisterInfo::isPhysicalRegister(NewUseReg)) {
// If MOUse is a virtual reg, we need to apply it to the new physical reg
// we're going to replace it with.
if (MOUse.getSubReg())
NewUseReg = TRI->getSubReg(NewUseReg, MOUse.getSubReg());
// If the original use subreg isn't valid on the new src reg, we can't
// forward it here.
if (!NewUseReg)
return std::make_tuple(0, 0, false);
NewUseSubReg = 0;
} else {
// %v1 = COPY %v2:sub1
// USE %v1:sub2
// The new use is %v2:sub1:sub2
NewUseSubReg =
TRI->composeSubRegIndices(CopySrc.getSubReg(), MOUse.getSubReg());
// Check that NewUseSubReg is valid on NewUseReg
if (NewUseSubReg &&
!TRI->getSubClassWithSubReg(MRI->getRegClass(NewUseReg), NewUseSubReg))
return std::make_tuple(0, 0, false);
}
return std::make_tuple(NewUseReg, NewUseSubReg, true);
}
/// Check that \p MI does not have implicit uses that overlap with it's \p Use
/// operand (the register being replaced), since these can sometimes be
/// implicitly tied to other operands. For example, on AMDGPU:
///
/// V_MOVRELS_B32_e32 %VGPR2, %M0<imp-use>, %EXEC<imp-use>, %VGPR2_VGPR3_VGPR4_VGPR5<imp-use>
///
/// the %VGPR2 is implicitly tied to the larger reg operand, but we have no
/// way of knowing we need to update the latter when updating the former.
bool MachineCopyPropagation::hasImplicitOverlap(const MachineInstr &MI,
const MachineOperand &Use) {
if (!TargetRegisterInfo::isPhysicalRegister(Use.getReg()))
return false;
for (const MachineOperand &MIUse : MI.uses())
if (&MIUse != &Use && MIUse.isReg() && MIUse.isImplicit() &&
TRI->regsOverlap(Use.getReg(), MIUse.getReg()))
return true;
return false;
}
/// Narrow the register class of the forwarded vreg so it matches any
/// instruction constraints. \p MI is the instruction being forwarded to. \p
/// MOUse is the operand being replaced in \p MI (which hasn't yet been updated
/// at the time this function is called). \p NewUseReg and \p NewUseSubReg are
/// what the \p MOUse will be changed to after forwarding.
///
/// If we are forwarding
/// A:RCA = COPY B:RCB
/// into
/// ... = OP A:RCA
///
/// then we need to narrow the register class of B so that it is a subclass
/// of RCA so that it meets the instruction register class constraints.
void MachineCopyPropagation::narrowRegClass(const MachineInstr &MI,
const MachineOperand &MOUse,
unsigned NewUseReg,
unsigned NewUseSubReg) {
if (!TargetRegisterInfo::isVirtualRegister(NewUseReg))
return;
// Make sure the virtual reg class allows the subreg.
if (NewUseSubReg) {
const TargetRegisterClass *CurUseRC = MRI->getRegClass(NewUseReg);
const TargetRegisterClass *NewUseRC =
TRI->getSubClassWithSubReg(CurUseRC, NewUseSubReg);
if (CurUseRC != NewUseRC) {
DEBUG(dbgs() << "MCP: Setting regclass of " << PrintReg(NewUseReg, TRI)
<< " to " << TRI->getRegClassName(NewUseRC) << "\n");
MRI->setRegClass(NewUseReg, NewUseRC);
}
}
unsigned MOUseOpNo = &MOUse - &MI.getOperand(0);
const TargetRegisterClass *InstRC =
TII->getRegClass(MI.getDesc(), MOUseOpNo, TRI, *MF);
if (InstRC) {
const TargetRegisterClass *CurUseRC = MRI->getRegClass(NewUseReg);
if (NewUseSubReg)
InstRC = TRI->getMatchingSuperRegClass(CurUseRC, InstRC, NewUseSubReg);
if (!InstRC->hasSubClassEq(CurUseRC)) {
const TargetRegisterClass *NewUseRC =
TRI->getCommonSubClass(InstRC, CurUseRC);
DEBUG(dbgs() << "MCP: Setting regclass of " << PrintReg(NewUseReg, TRI)
<< " to " << TRI->getRegClassName(NewUseRC) << "\n");
MRI->setRegClass(NewUseReg, NewUseRC);
}
}
}
/// Update the LiveInterval information to reflect the destination of \p Copy
/// being forwarded to a use in \p UseMI. \p OrigUseReg is the register being
/// forwarded through. It should be the destination register of \p Copy and has
/// already been replaced in \p UseMI at the point this function is called. \p
/// NewUseReg and \p NewUseSubReg are the register and subregister being
/// forwarded. They should be the source register of the \p Copy and should be
/// the value of the \p UseMI operand being forwarded at the point this function
/// is called.
void MachineCopyPropagation::updateForwardedCopyLiveInterval(
const MachineInstr &Copy, const MachineInstr &UseMI, unsigned OrigUseReg,
unsigned NewUseReg, unsigned NewUseSubReg) {
assert(TRI->isSubRegisterEq(getPhysReg(OrigUseReg, 0),
getFullPhysReg(Copy.getOperand(0))) &&
"OrigUseReg mismatch");
assert(TRI->isSubRegisterEq(getFullPhysReg(Copy.getOperand(1)),
getPhysReg(NewUseReg, 0)) &&
"NewUseReg mismatch");
// Extend live range starting from COPY early-clobber slot, since that
// is where the original src live range ends.
SlotIndex CopyUseIdx =
Indexes->getInstructionIndex(Copy).getRegSlot(true /*=EarlyClobber*/);
SlotIndex UseIdx = Indexes->getInstructionIndex(UseMI).getRegSlot();
if (TargetRegisterInfo::isVirtualRegister(NewUseReg)) {
LiveInterval &LI = LIS->getInterval(NewUseReg);
LI.extendInBlock(CopyUseIdx, UseIdx);
LaneBitmask UseMask = TRI->getSubRegIndexLaneMask(NewUseSubReg);
for (auto &S : LI.subranges())
if ((S.LaneMask & UseMask).any() && S.find(CopyUseIdx))
S.extendInBlock(CopyUseIdx, UseIdx);
} else {
assert(NewUseSubReg == 0 && "Unexpected subreg on physical register!");
for (MCRegUnitIterator UI(NewUseReg, TRI); UI.isValid(); ++UI) {
LiveRange &LR = LIS->getRegUnit(*UI);
LR.extendInBlock(CopyUseIdx, UseIdx);
}
}
if (!TargetRegisterInfo::isVirtualRegister(OrigUseReg))
return;
LiveInterval &LI = LIS->getInterval(OrigUseReg);
// Can happen for undef uses.
if (LI.empty())
return;
SlotIndex UseIndex = Indexes->getInstructionIndex(UseMI);
const LiveRange::Segment *UseSeg = LI.getSegmentContaining(UseIndex);
// Only shrink if forwarded use is the end of a segment.
if (UseSeg->end != UseIndex.getRegSlot())
return;
SmallVector<MachineInstr *, 4> DeadInsts;
LIS->shrinkToUses(&LI, &DeadInsts);
if (!DeadInsts.empty()) {
SmallVector<unsigned, 8> NewRegs;
LiveRangeEdit(nullptr, NewRegs, *MF, *LIS, nullptr, this)
.eliminateDeadDefs(DeadInsts);
}
}
void MachineCopyPropagation::LRE_WillEraseInstruction(MachineInstr *MI) {
// Remove this COPY from further consideration for forwarding.
ClobberRegister(getFullPhysReg(MI->getOperand(0)));
Changed = true;
}
/// Look for available copies whose destination register is used by \p MI and
/// replace the use in \p MI with the copy's source register.
void MachineCopyPropagation::forwardUses(MachineInstr &MI) {
if (AvailCopyMap.empty())
return;
// Look for non-tied explicit vreg uses that have an active COPY
// instruction that defines the physical register allocated to them.
// Replace the vreg with the source of the active COPY.
for (MachineOperand &MOUse : MI.explicit_uses()) {
if (!MOUse.isReg() || MOUse.isTied())
continue;
unsigned UseReg = MOUse.getReg();
if (!UseReg)
continue;
if (TargetRegisterInfo::isVirtualRegister(UseReg))
UseReg = VRM->getPhys(UseReg);
else if (MI.isCall() || MI.isReturn() || MI.isInlineAsm() ||
MI.hasUnmodeledSideEffects() || MI.isDebugValue() || MI.isKill())
// Some instructions seem to have ABI uses e.g. not marked as
// implicit, which can lead to forwarding them when we shouldn't, so
// restrict the types of instructions we forward physical regs into.
continue;
// Don't forward COPYs via non-allocatable regs since they can have
// non-standard semantics.
if (!MRI->isAllocatable(UseReg))
continue;
auto CI = AvailCopyMap.find(UseReg);
if (CI == AvailCopyMap.end())
continue;
MachineInstr &Copy = *CI->second;
MachineOperand &CopyDst = Copy.getOperand(0);
MachineOperand &CopySrc = Copy.getOperand(1);
// Don't forward COPYs that are already NOPs due to register assignment.
if (getPhysReg(CopyDst) == getPhysReg(CopySrc))
continue;
// FIXME: Don't handle partial uses of wider COPYs yet.
if (CopyDst.getSubReg() != 0 || UseReg != getPhysReg(CopyDst))
continue;
// Don't forward COPYs of non-allocatable regs unless they are constant.
unsigned CopySrcReg = CopySrc.getReg();
if (TargetRegisterInfo::isPhysicalRegister(CopySrcReg) &&
!MRI->isAllocatable(CopySrcReg) && !MRI->isConstantPhysReg(CopySrcReg))
continue;
if (!isForwardableRegClassCopy(Copy, MI))
continue;
unsigned NewUseReg, NewUseSubReg;
bool SubRegOK;
std::tie(NewUseReg, NewUseSubReg, SubRegOK) =
checkUseSubReg(CopySrc, MOUse);
if (!SubRegOK)
continue;
if (hasImplicitOverlap(MI, MOUse))
continue;
DEBUG(dbgs() << "MCP: Replacing "
<< PrintReg(MOUse.getReg(), TRI, MOUse.getSubReg())
<< "\n with "
<< PrintReg(NewUseReg, TRI, CopySrc.getSubReg())
<< "\n in "
<< MI
<< " from "
<< Copy);
narrowRegClass(MI, MOUse, NewUseReg, NewUseSubReg);
unsigned OrigUseReg = MOUse.getReg();
MOUse.setReg(NewUseReg);
MOUse.setSubReg(NewUseSubReg);
DEBUG(dbgs() << "MCP: After replacement: " << MI << "\n");
if (PreRegRewrite)
updateForwardedCopyLiveInterval(Copy, MI, OrigUseReg, NewUseReg,
NewUseSubReg);
else
for (MachineInstr &KMI :
make_range(Copy.getIterator(), std::next(MI.getIterator())))
KMI.clearRegisterKills(NewUseReg, TRI);
++NumCopyForwards;
Changed = true;
}
}
void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) { void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
DEBUG(dbgs() << "MCP: CopyPropagateBlock " << MBB.getName() << "\n"); DEBUG(dbgs() << "MCP: CopyPropagateBlock " << MBB.getName() << "\n");
@ -687,8 +198,12 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
++I; ++I;
if (MI->isCopy()) { if (MI->isCopy()) {
unsigned Def = getPhysReg(MI->getOperand(0)); unsigned Def = MI->getOperand(0).getReg();
unsigned Src = getPhysReg(MI->getOperand(1)); unsigned Src = MI->getOperand(1).getReg();
assert(!TargetRegisterInfo::isVirtualRegister(Def) &&
!TargetRegisterInfo::isVirtualRegister(Src) &&
"MachineCopyPropagation should be run after register allocation!");
// The two copies cancel out and the source of the first copy // The two copies cancel out and the source of the first copy
// hasn't been overridden, eliminate the second one. e.g. // hasn't been overridden, eliminate the second one. e.g.
@ -705,16 +220,8 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
// %ECX<def> = COPY %EAX // %ECX<def> = COPY %EAX
// => // =>
// %ECX<def> = COPY %EAX // %ECX<def> = COPY %EAX
if (!PreRegRewrite) if (eraseIfRedundant(*MI, Def, Src) || eraseIfRedundant(*MI, Src, Def))
if (eraseIfRedundant(*MI, Def, Src) || eraseIfRedundant(*MI, Src, Def)) continue;
continue;
forwardUses(*MI);
// Src may have been changed by forwardUses()
Src = getPhysReg(MI->getOperand(1));
unsigned DefClobber = getFullPhysReg(MI->getOperand(0));
unsigned SrcClobber = getFullPhysReg(MI->getOperand(1));
// If Src is defined by a previous copy, the previous copy cannot be // If Src is defined by a previous copy, the previous copy cannot be
// eliminated. // eliminated.
@ -731,10 +238,7 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
DEBUG(dbgs() << "MCP: Copy is a deletion candidate: "; MI->dump()); DEBUG(dbgs() << "MCP: Copy is a deletion candidate: "; MI->dump());
// Copy is now a candidate for deletion. // Copy is now a candidate for deletion.
// Only look for dead COPYs if we're not running just before if (!MRI->isReserved(Def))
// VirtRegRewriter, since presumably these COPYs will have already been
// removed.
if (!PreRegRewrite && !MRI->isReserved(Def))
MaybeDeadCopies.insert(MI); MaybeDeadCopies.insert(MI);
// If 'Def' is previously source of another copy, then this earlier copy's // If 'Def' is previously source of another copy, then this earlier copy's
@ -744,11 +248,11 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
// %xmm2<def> = copy %xmm0 // %xmm2<def> = copy %xmm0
// ... // ...
// %xmm2<def> = copy %xmm9 // %xmm2<def> = copy %xmm9
ClobberRegister(DefClobber); ClobberRegister(Def);
for (const MachineOperand &MO : MI->implicit_operands()) { for (const MachineOperand &MO : MI->implicit_operands()) {
if (!MO.isReg() || !MO.isDef()) if (!MO.isReg() || !MO.isDef())
continue; continue;
unsigned Reg = getFullPhysReg(MO); unsigned Reg = MO.getReg();
if (!Reg) if (!Reg)
continue; continue;
ClobberRegister(Reg); ClobberRegister(Reg);
@ -763,27 +267,13 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
// Remember source that's copied to Def. Once it's clobbered, then // Remember source that's copied to Def. Once it's clobbered, then
// it's no longer available for copy propagation. // it's no longer available for copy propagation.
RegList &DestList = SrcMap[SrcClobber]; RegList &DestList = SrcMap[Src];
if (!is_contained(DestList, DefClobber)) if (!is_contained(DestList, Def))
DestList.push_back(DefClobber); DestList.push_back(Def);
continue; continue;
} }
// Clobber any earlyclobber regs first.
for (const MachineOperand &MO : MI->operands())
if (MO.isReg() && MO.isEarlyClobber()) {
unsigned Reg = getFullPhysReg(MO);
// If we have a tied earlyclobber, that means it is also read by this
// instruction, so we need to make sure we don't remove it as dead
// later.
if (MO.isTied())
ReadRegister(Reg);
ClobberRegister(Reg);
}
forwardUses(*MI);
// Not a copy. // Not a copy.
SmallVector<unsigned, 2> Defs; SmallVector<unsigned, 2> Defs;
const MachineOperand *RegMask = nullptr; const MachineOperand *RegMask = nullptr;
@ -792,11 +282,14 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
RegMask = &MO; RegMask = &MO;
if (!MO.isReg()) if (!MO.isReg())
continue; continue;
unsigned Reg = getFullPhysReg(MO); unsigned Reg = MO.getReg();
if (!Reg) if (!Reg)
continue; continue;
if (MO.isDef() && !MO.isEarlyClobber()) { assert(!TargetRegisterInfo::isVirtualRegister(Reg) &&
"MachineCopyPropagation should be run after register allocation!");
if (MO.isDef()) {
Defs.push_back(Reg); Defs.push_back(Reg);
continue; continue;
} else if (MO.readsReg()) } else if (MO.readsReg())
@ -853,8 +346,6 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
// since we don't want to trust live-in lists. // since we don't want to trust live-in lists.
if (MBB.succ_empty()) { if (MBB.succ_empty()) {
for (MachineInstr *MaybeDead : MaybeDeadCopies) { for (MachineInstr *MaybeDead : MaybeDeadCopies) {
DEBUG(dbgs() << "MCP: Removing copy due to no live-out succ: ";
MaybeDead->dump());
assert(!MRI->isReserved(MaybeDead->getOperand(0).getReg())); assert(!MRI->isReserved(MaybeDead->getOperand(0).getReg()));
MaybeDead->eraseFromParent(); MaybeDead->eraseFromParent();
Changed = true; Changed = true;
@ -877,16 +368,10 @@ bool MachineCopyPropagation::runOnMachineFunction(MachineFunction &MF) {
TRI = MF.getSubtarget().getRegisterInfo(); TRI = MF.getSubtarget().getRegisterInfo();
TII = MF.getSubtarget().getInstrInfo(); TII = MF.getSubtarget().getInstrInfo();
MRI = &MF.getRegInfo(); MRI = &MF.getRegInfo();
this->MF = &MF;
if (PreRegRewrite) {
Indexes = &getAnalysis<SlotIndexes>();
LIS = &getAnalysis<LiveIntervals>();
VRM = &getAnalysis<VirtRegMap>();
}
NoSubRegLiveness = !MRI->subRegLivenessEnabled();
for (MachineBasicBlock &MBB : MF) for (MachineBasicBlock &MBB : MF)
CopyPropagateBlock(MBB); CopyPropagateBlock(MBB);
return Changed; return Changed;
} }

View File

@ -88,8 +88,6 @@ static cl::opt<bool> DisableCGP("disable-cgp", cl::Hidden,
cl::desc("Disable Codegen Prepare")); cl::desc("Disable Codegen Prepare"));
static cl::opt<bool> DisableCopyProp("disable-copyprop", cl::Hidden, static cl::opt<bool> DisableCopyProp("disable-copyprop", cl::Hidden,
cl::desc("Disable Copy Propagation pass")); cl::desc("Disable Copy Propagation pass"));
static cl::opt<bool> DisableCopyPropPreRegRewrite("disable-copyprop-prerewrite", cl::Hidden,
cl::desc("Disable Copy Propagation Pre-Register Re-write pass"));
static cl::opt<bool> DisablePartialLibcallInlining("disable-partial-libcall-inlining", static cl::opt<bool> DisablePartialLibcallInlining("disable-partial-libcall-inlining",
cl::Hidden, cl::desc("Disable Partial Libcall Inlining")); cl::Hidden, cl::desc("Disable Partial Libcall Inlining"));
static cl::opt<bool> EnableImplicitNullChecks( static cl::opt<bool> EnableImplicitNullChecks(
@ -250,9 +248,6 @@ static IdentifyingPassPtr overridePass(AnalysisID StandardID,
if (StandardID == &MachineCopyPropagationID) if (StandardID == &MachineCopyPropagationID)
return applyDisable(TargetID, DisableCopyProp); return applyDisable(TargetID, DisableCopyProp);
if (StandardID == &MachineCopyPropagationPreRegRewriteID)
return applyDisable(TargetID, DisableCopyPropPreRegRewrite);
return TargetID; return TargetID;
} }
@ -1064,10 +1059,6 @@ void TargetPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
// Allow targets to change the register assignments before rewriting. // Allow targets to change the register assignments before rewriting.
addPreRewrite(); addPreRewrite();
// Copy propagate to forward register uses and try to eliminate COPYs that
// were not coalesced.
addPass(&MachineCopyPropagationPreRegRewriteID);
// Finally rewrite virtual registers. // Finally rewrite virtual registers.
addPass(&VirtRegRewriterID); addPass(&VirtRegRewriterID);

View File

@ -9,8 +9,7 @@ define i16 @halfword(%struct.a* %ctx, i32 %xor72) nounwind {
; CHECK-LABEL: halfword: ; CHECK-LABEL: halfword:
; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8 ; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
; CHECK: ldrh [[REG1:w[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #1] ; CHECK: ldrh [[REG1:w[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #1]
; CHECK: mov [[REG3:x[0-9]+]], [[REG2]] ; CHECK: strh [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #1]
; CHECK: strh [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #1]
%shr81 = lshr i32 %xor72, 9 %shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64 %conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255 %idxprom83 = and i64 %conv82, 255
@ -25,8 +24,7 @@ define i32 @word(%struct.b* %ctx, i32 %xor72) nounwind {
; CHECK-LABEL: word: ; CHECK-LABEL: word:
; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8 ; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
; CHECK: ldr [[REG1:w[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #2] ; CHECK: ldr [[REG1:w[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #2]
; CHECK: mov [[REG3:x[0-9]+]], [[REG2]] ; CHECK: str [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #2]
; CHECK: str [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #2]
%shr81 = lshr i32 %xor72, 9 %shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64 %conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255 %idxprom83 = and i64 %conv82, 255
@ -41,8 +39,7 @@ define i64 @doubleword(%struct.c* %ctx, i32 %xor72) nounwind {
; CHECK-LABEL: doubleword: ; CHECK-LABEL: doubleword:
; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8 ; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
; CHECK: ldr [[REG1:x[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #3] ; CHECK: ldr [[REG1:x[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #3]
; CHECK: mov [[REG3:x[0-9]+]], [[REG2]] ; CHECK: str [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #3]
; CHECK: str [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #3]
%shr81 = lshr i32 %xor72, 9 %shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64 %conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255 %idxprom83 = and i64 %conv82, 255

View File

@ -8,9 +8,15 @@ define <2 x i64> @bar(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
; CHECK: add.2d v[[REG:[0-9]+]], v0, v1 ; CHECK: add.2d v[[REG:[0-9]+]], v0, v1
; CHECK: add d[[REG3:[0-9]+]], d[[REG]], d1 ; CHECK: add d[[REG3:[0-9]+]], d[[REG]], d1
; CHECK: sub d[[REG2:[0-9]+]], d[[REG]], d1 ; CHECK: sub d[[REG2:[0-9]+]], d[[REG]], d1
; CHECK-NOT: fmov ; Without advanced copy optimization, we end up with cross register
; banks copies that cannot be coalesced.
; CHECK-NOOPT: fmov [[COPY_REG3:x[0-9]+]], d[[REG3]]
; With advanced copy optimization, we end up with just one copy
; to insert the computed high part into the V register.
; CHECK-OPT-NOT: fmov
; CHECK: fmov [[COPY_REG2:x[0-9]+]], d[[REG2]] ; CHECK: fmov [[COPY_REG2:x[0-9]+]], d[[REG2]]
; CHECK-NOT: fmov ; CHECK-NOOPT: fmov d0, [[COPY_REG3]]
; CHECK-OPT-NOT: fmov
; CHECK: ins.d v0[1], [[COPY_REG2]] ; CHECK: ins.d v0[1], [[COPY_REG2]]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
; ;
@ -18,9 +24,11 @@ define <2 x i64> @bar(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
; GENERIC: add v[[REG:[0-9]+]].2d, v0.2d, v1.2d ; GENERIC: add v[[REG:[0-9]+]].2d, v0.2d, v1.2d
; GENERIC: add d[[REG3:[0-9]+]], d[[REG]], d1 ; GENERIC: add d[[REG3:[0-9]+]], d[[REG]], d1
; GENERIC: sub d[[REG2:[0-9]+]], d[[REG]], d1 ; GENERIC: sub d[[REG2:[0-9]+]], d[[REG]], d1
; GENERIC-NOT: fmov ; GENERIC-NOOPT: fmov [[COPY_REG3:x[0-9]+]], d[[REG3]]
; GENERIC-OPT-NOT: fmov
; GENERIC: fmov [[COPY_REG2:x[0-9]+]], d[[REG2]] ; GENERIC: fmov [[COPY_REG2:x[0-9]+]], d[[REG2]]
; GENERIC-NOT: fmov ; GENERIC-NOOPT: fmov d0, [[COPY_REG3]]
; GENERIC-OPT-NOT: fmov
; GENERIC: ins v0.d[1], [[COPY_REG2]] ; GENERIC: ins v0.d[1], [[COPY_REG2]]
; GENERIC-NEXT: ret ; GENERIC-NEXT: ret
%add = add <2 x i64> %a, %b %add = add <2 x i64> %a, %b

View File

@ -4,10 +4,8 @@
define i32 @t(i32 %a, i32 %b, i32 %c, i32 %d) nounwind ssp { define i32 @t(i32 %a, i32 %b, i32 %c, i32 %d) nounwind ssp {
entry: entry:
; CHECK-LABEL: t: ; CHECK-LABEL: t:
; CHECK: mov [[REG2:x[0-9]+]], x3 ; CHECK: mov x0, [[REG1:x[0-9]+]]
; CHECK: mov [[REG1:x[0-9]+]], x2 ; CHECK: mov x1, [[REG2:x[0-9]+]]
; CHECK: mov x0, x2
; CHECK: mov x1, x3
; CHECK: bl _foo ; CHECK: bl _foo
; CHECK: mov x0, [[REG1]] ; CHECK: mov x0, [[REG1]]
; CHECK: mov x1, [[REG2]] ; CHECK: mov x1, [[REG2]]

View File

@ -350,7 +350,7 @@ else:
; CHECK-LABEL: test_phi: ; CHECK-LABEL: test_phi:
; CHECK: mov x[[PTR:[0-9]+]], x0 ; CHECK: mov x[[PTR:[0-9]+]], x0
; CHECK: ldr h[[AB:[0-9]+]], [x0] ; CHECK: ldr h[[AB:[0-9]+]], [x[[PTR]]]
; CHECK: [[LOOP:LBB[0-9_]+]]: ; CHECK: [[LOOP:LBB[0-9_]+]]:
; CHECK: mov.16b v[[R:[0-9]+]], v[[AB]] ; CHECK: mov.16b v[[R:[0-9]+]], v[[AB]]
; CHECK: ldr h[[AB]], [x[[PTR]]] ; CHECK: ldr h[[AB]], [x[[PTR]]]

View File

@ -17,9 +17,6 @@ define i32 @test_multiflag(i32 %n, i32 %m, i32 %o) {
%val = zext i1 %test to i32 %val = zext i1 %test to i32
; CHECK: cset {{[xw][0-9]+}}, ne ; CHECK: cset {{[xw][0-9]+}}, ne
; CHECK: mov [[RHSCOPY:w[0-9]+]], [[RHS]]
; CHECK: mov [[LHSCOPY:w[0-9]+]], [[LHS]]
store i32 %val, i32* @var store i32 %val, i32* @var
call void @bar() call void @bar()
@ -28,7 +25,7 @@ define i32 @test_multiflag(i32 %n, i32 %m, i32 %o) {
; Currently, the comparison is emitted again. An MSR/MRS pair would also be ; Currently, the comparison is emitted again. An MSR/MRS pair would also be
; acceptable, but assuming the call preserves NZCV is not. ; acceptable, but assuming the call preserves NZCV is not.
br i1 %test, label %iftrue, label %iffalse br i1 %test, label %iftrue, label %iffalse
; CHECK: cmp [[LHSCOPY]], [[RHSCOPY]] ; CHECK: cmp [[LHS]], [[RHS]]
; CHECK: b.eq ; CHECK: b.eq
iftrue: iftrue:

View File

@ -8,9 +8,10 @@
define void @test(%struct1* %fde, i32 %fd, void (i32, i32, i8*)* %func, i8* %arg) { define void @test(%struct1* %fde, i32 %fd, void (i32, i32, i8*)* %func, i8* %arg) {
;CHECK-LABEL: test ;CHECK-LABEL: test
entry: entry:
; A53: mov [[DATA:w[0-9]+]], w1
; A53: str q{{[0-9]+}}, {{.*}} ; A53: str q{{[0-9]+}}, {{.*}}
; A53: str q{{[0-9]+}}, {{.*}} ; A53: str q{{[0-9]+}}, {{.*}}
; A53: str w1, {{.*}} ; A53: str [[DATA]], {{.*}}
%0 = bitcast %struct1* %fde to i8* %0 = bitcast %struct1* %fde to i8*
tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 40, i32 8, i1 false) tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 40, i32 8, i1 false)

View File

@ -7,8 +7,8 @@ declare void @foo(i32)
define void @test(i32 %px) { define void @test(i32 %px) {
; CHECK_LABEL: test: ; CHECK_LABEL: test:
; CHECK_LABEL: %entry ; CHECK_LABEL: %entry
; CHECK: subs [[REG0:w[0-9]+]], ; CHECK: subs
; CHECK: csel {{w[0-9]+}}, wzr, [[REG0]] ; CHECK-NEXT: csel
entry: entry:
%sub = add nsw i32 %px, -1 %sub = add nsw i32 %px, -1
%cmp = icmp slt i32 %px, 1 %cmp = icmp slt i32 %px, 1

View File

@ -127,21 +127,20 @@ entry:
} }
; GCN-LABEL: {{^}}call_void_func_byval_struct_kernel: ; GCN-LABEL: {{^}}call_void_func_byval_struct_kernel:
; GCN: s_add_u32 s32, s7, 0xa00{{$}} ; GCN: s_mov_b32 s33, s7
; GCN: s_add_u32 s32, s33, 0xa00{{$}}
; GCN-DAG: v_mov_b32_e32 [[NINE:v[0-9]+]], 9 ; GCN-DAG: v_mov_b32_e32 [[NINE:v[0-9]+]], 9
; GCN-DAG: v_mov_b32_e32 [[THIRTEEN:v[0-9]+]], 13 ; GCN-DAG: v_mov_b32_e32 [[THIRTEEN:v[0-9]+]], 13
; GCN-DAG: buffer_store_dword [[NINE]], off, s[0:3], s7 offset:8 ; GCN-DAG: buffer_store_dword [[NINE]], off, s[0:3], s33 offset:8
; GCN: buffer_store_dword [[THIRTEEN]], off, s[0:3], s7 offset:24 ; GCN: buffer_store_dword [[THIRTEEN]], off, s[0:3], s33 offset:24
; GCN: s_mov_b32 s33, s7
; GCN-DAG: s_add_u32 s32, s32, 0x800{{$}} ; GCN-DAG: s_add_u32 s32, s32, 0x800{{$}}
; GCN-DAG: buffer_load_dword [[LOAD0:v[0-9]+]], off, s[0:3], s{{7|33}} offset:8 ; GCN-DAG: buffer_load_dword [[LOAD0:v[0-9]+]], off, s[0:3], s33 offset:8
; GCN-DAG: buffer_load_dword [[LOAD1:v[0-9]+]], off, s[0:3], s{{7|33}} offset:12 ; GCN-DAG: buffer_load_dword [[LOAD1:v[0-9]+]], off, s[0:3], s33 offset:12
; GCN-DAG: buffer_load_dword [[LOAD2:v[0-9]+]], off, s[0:3], s{{7|33}} offset:16 ; GCN-DAG: buffer_load_dword [[LOAD2:v[0-9]+]], off, s[0:3], s33 offset:16
; GCN-DAG: buffer_load_dword [[LOAD3:v[0-9]+]], off, s[0:3], s{{7|33}} offset:20 ; GCN-DAG: buffer_load_dword [[LOAD3:v[0-9]+]], off, s[0:3], s33 offset:20
; GCN-DAG: buffer_store_dword [[LOAD0]], off, s[0:3], s32 offset:4{{$}} ; GCN-DAG: buffer_store_dword [[LOAD0]], off, s[0:3], s32 offset:4{{$}}
; GCN-DAG: buffer_store_dword [[LOAD1]], off, s[0:3], s32 offset:8 ; GCN-DAG: buffer_store_dword [[LOAD1]], off, s[0:3], s32 offset:8

View File

@ -65,17 +65,17 @@ define amdgpu_kernel void @test_call_external_void_func_i1_imm() #0 {
; GCN-LABEL: {{^}}test_call_external_void_func_i1_signext: ; GCN-LABEL: {{^}}test_call_external_void_func_i1_signext:
; MESA: s_mov_b32 s33, s3{{$}} ; MESA: s_mov_b32 s33, s3{{$}}
; MESA-DAG: s_mov_b32 s32, s3{{$}}
; HSA: s_mov_b32 s33, s9{{$}} ; HSA: s_mov_b32 s33, s9{{$}}
; HSA: s_mov_b32 s32, s9{{$}}
; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}} ; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i1_signext@rel32@lo+4 ; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i1_signext@rel32@lo+4
; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i1_signext@rel32@hi+4 ; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i1_signext@rel32@hi+4
; GCN-NEXT: buffer_load_ubyte [[VAR:v[0-9]+]] ; GCN-NEXT: buffer_load_ubyte [[VAR:v[0-9]+]]
; HSA-NEXT: s_mov_b32 s4, s33 ; HSA-NEXT: s_mov_b32 s4, s33
; HSA-NEXT: s_mov_b32 s32, s33
; MESA-DAG: s_mov_b32 s4, s33{{$}} ; MESA-DAG: s_mov_b32 s4, s33{{$}}
; MESA-DAG: s_mov_b32 s32, s33{{$}}
; GCN: s_waitcnt vmcnt(0) ; GCN: s_waitcnt vmcnt(0)
; GCN-NEXT: v_bfe_i32 v0, v0, 0, 1 ; GCN-NEXT: v_bfe_i32 v0, v0, 0, 1
@ -90,8 +90,6 @@ define amdgpu_kernel void @test_call_external_void_func_i1_signext(i32) #0 {
; FIXME: load should be scheduled before getpc ; FIXME: load should be scheduled before getpc
; GCN-LABEL: {{^}}test_call_external_void_func_i1_zeroext: ; GCN-LABEL: {{^}}test_call_external_void_func_i1_zeroext:
; MESA: s_mov_b32 s33, s3{{$}} ; MESA: s_mov_b32 s33, s3{{$}}
; MESA: s_mov_b32 s32, s3{{$}}
; HSA: s_mov_b32 s32, s9{{$}}
; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}} ; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i1_zeroext@rel32@lo+4 ; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i1_zeroext@rel32@lo+4
@ -99,6 +97,7 @@ define amdgpu_kernel void @test_call_external_void_func_i1_signext(i32) #0 {
; GCN-NEXT: buffer_load_ubyte v0 ; GCN-NEXT: buffer_load_ubyte v0
; GCN-DAG: s_mov_b32 s4, s33{{$}} ; GCN-DAG: s_mov_b32 s4, s33{{$}}
; GCN-DAG: s_mov_b32 s32, s33{{$}}
; GCN: s_waitcnt vmcnt(0) ; GCN: s_waitcnt vmcnt(0)
; GCN-NEXT: v_and_b32_e32 v0, 1, v0 ; GCN-NEXT: v_and_b32_e32 v0, 1, v0
@ -112,15 +111,14 @@ define amdgpu_kernel void @test_call_external_void_func_i1_zeroext(i32) #0 {
; GCN-LABEL: {{^}}test_call_external_void_func_i8_imm: ; GCN-LABEL: {{^}}test_call_external_void_func_i8_imm:
; MESA-DAG: s_mov_b32 s33, s3{{$}} ; MESA-DAG: s_mov_b32 s33, s3{{$}}
; MESA: s_mov_b32 s32, s3{{$}}
; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}} ; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i8@rel32@lo+4 ; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i8@rel32@lo+4
; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i8@rel32@hi+4 ; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i8@rel32@hi+4
; GCN-NEXT: v_mov_b32_e32 v0, 0x7b ; GCN-NEXT: v_mov_b32_e32 v0, 0x7b
; HSA-DAG: s_mov_b32 s4, s9{{$}} ; HSA-DAG: s_mov_b32 s4, s33{{$}}
; HSA-DAG: s_mov_b32 s32, s9{{$}} ; GCN-DAG: s_mov_b32 s32, s33{{$}}
; GCN: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}} ; GCN: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}}
; GCN-NEXT: s_endpgm ; GCN-NEXT: s_endpgm
@ -131,17 +129,16 @@ define amdgpu_kernel void @test_call_external_void_func_i8_imm(i32) #0 {
; FIXME: don't wait before call ; FIXME: don't wait before call
; GCN-LABEL: {{^}}test_call_external_void_func_i8_signext: ; GCN-LABEL: {{^}}test_call_external_void_func_i8_signext:
; HSA-DAG: s_mov_b32 s33, s9{{$}}
; MESA-DAG: s_mov_b32 s33, s3{{$}} ; MESA-DAG: s_mov_b32 s33, s3{{$}}
; MESA-DAG: s_mov_b32 s32, s3{{$}}
; GCN-DAG: buffer_load_sbyte v0 ; GCN-DAG: buffer_load_sbyte v0
; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}} ; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i8_signext@rel32@lo+4 ; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i8_signext@rel32@lo+4
; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i8_signext@rel32@hi+4 ; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i8_signext@rel32@hi+4
; MESA-DAG: s_mov_b32 s4, s33 ; GCN-DAG: s_mov_b32 s4, s33
; HSA-DAG: s_mov_b32 s4, s9 ; GCN-DAG: s_mov_b32 s32, s3
; HSA-DAG: s_mov_b32 s32, s9{{$}}
; GCN: s_waitcnt vmcnt(0) ; GCN: s_waitcnt vmcnt(0)
; GCN-NEXT: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}} ; GCN-NEXT: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}}
@ -154,16 +151,15 @@ define amdgpu_kernel void @test_call_external_void_func_i8_signext(i32) #0 {
; GCN-LABEL: {{^}}test_call_external_void_func_i8_zeroext: ; GCN-LABEL: {{^}}test_call_external_void_func_i8_zeroext:
; MESA-DAG: s_mov_b32 s33, s3{{$}} ; MESA-DAG: s_mov_b32 s33, s3{{$}}
; MESA-DAG: s_mov_b32 s32, s3{{$}} ; HSA-DAG: s_mov_b32 s33, s9{{$}}
; GCN-DAG: buffer_load_ubyte v0 ; GCN-DAG: buffer_load_ubyte v0
; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}} ; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i8_zeroext@rel32@lo+4 ; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i8_zeroext@rel32@lo+4
; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i8_zeroext@rel32@hi+4 ; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i8_zeroext@rel32@hi+4
; MESA-DAG: s_mov_b32 s4, s33 ; GCN-DAG: s_mov_b32 s4, s33
; HSA-DAG: s_mov_b32 s4, s9 ; GCN-DAG: s_mov_b32 s32, s33
; HSA-DAG: s_mov_b32 s32, s9
; GCN: s_waitcnt vmcnt(0) ; GCN: s_waitcnt vmcnt(0)
; GCN-NEXT: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}} ; GCN-NEXT: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}}
@ -178,8 +174,7 @@ define amdgpu_kernel void @test_call_external_void_func_i8_zeroext(i32) #0 {
; GCN-DAG: v_mov_b32_e32 v0, 0x7b{{$}} ; GCN-DAG: v_mov_b32_e32 v0, 0x7b{{$}}
; GCN-DAG: s_mov_b32 s4, s33 ; GCN-DAG: s_mov_b32 s4, s33
; MESA-DAG: s_mov_b32 s32, s1 ; GCN-DAG: s_mov_b32 s32, s33
; HSA-DAG: s_mov_b32 s32, s7
; GCN: s_swappc_b64 ; GCN: s_swappc_b64
define amdgpu_kernel void @test_call_external_void_func_i16_imm() #0 { define amdgpu_kernel void @test_call_external_void_func_i16_imm() #0 {
@ -189,16 +184,14 @@ define amdgpu_kernel void @test_call_external_void_func_i16_imm() #0 {
; GCN-LABEL: {{^}}test_call_external_void_func_i16_signext: ; GCN-LABEL: {{^}}test_call_external_void_func_i16_signext:
; MESA-DAG: s_mov_b32 s33, s3{{$}} ; MESA-DAG: s_mov_b32 s33, s3{{$}}
; MESA-DAG: s_mov_b32 s32, s3{{$}}
; GCN-DAG: buffer_load_sshort v0 ; GCN-DAG: buffer_load_sshort v0
; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}} ; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i16_signext@rel32@lo+4 ; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i16_signext@rel32@lo+4
; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i16_signext@rel32@hi+4 ; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i16_signext@rel32@hi+4
; MESA-DAG: s_mov_b32 s4, s33 ; GCN-DAG: s_mov_b32 s4, s33
; HSA-DAG: s_mov_b32 s4, s9 ; GCN-DAG: s_mov_b32 s32, s33
; HSA-DAG: s_mov_b32 s32, s9
; GCN: s_waitcnt vmcnt(0) ; GCN: s_waitcnt vmcnt(0)
; GCN-NEXT: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}} ; GCN-NEXT: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}}
@ -211,7 +204,6 @@ define amdgpu_kernel void @test_call_external_void_func_i16_signext(i32) #0 {
; GCN-LABEL: {{^}}test_call_external_void_func_i16_zeroext: ; GCN-LABEL: {{^}}test_call_external_void_func_i16_zeroext:
; MESA-DAG: s_mov_b32 s33, s3{{$}} ; MESA-DAG: s_mov_b32 s33, s3{{$}}
; MESA-DAG: s_mov_b32 s32, s3{{$}}
; GCN-DAG: buffer_load_ushort v0 ; GCN-DAG: buffer_load_ushort v0
@ -219,9 +211,8 @@ define amdgpu_kernel void @test_call_external_void_func_i16_signext(i32) #0 {
; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i16_zeroext@rel32@lo+4 ; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i16_zeroext@rel32@lo+4
; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i16_zeroext@rel32@hi+4 ; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i16_zeroext@rel32@hi+4
; MESA-DAG: s_mov_b32 s4, s33 ; GCN-DAG: s_mov_b32 s4, s33
; HSA-DAG: s_mov_b32 s4, s9 ; GCN-DAG: s_mov_b32 s32, s33
; HSA-DAG: s_mov_b32 s32, s9
; GCN: s_waitcnt vmcnt(0) ; GCN: s_waitcnt vmcnt(0)
; GCN-NEXT: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}} ; GCN-NEXT: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}}
@ -234,15 +225,13 @@ define amdgpu_kernel void @test_call_external_void_func_i16_zeroext(i32) #0 {
; GCN-LABEL: {{^}}test_call_external_void_func_i32_imm: ; GCN-LABEL: {{^}}test_call_external_void_func_i32_imm:
; MESA-DAG: s_mov_b32 s33, s3{{$}} ; MESA-DAG: s_mov_b32 s33, s3{{$}}
; MESA-DAG: s_mov_b32 s32, s3{{$}}
; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}} ; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i32@rel32@lo+4 ; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i32@rel32@lo+4
; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i32@rel32@hi+4 ; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i32@rel32@hi+4
; GCN: v_mov_b32_e32 v0, 42 ; GCN: v_mov_b32_e32 v0, 42
; MESA-DAG: s_mov_b32 s4, s33 ; GCN-DAG: s_mov_b32 s4, s33
; HSA-DAG: s_mov_b32 s4, s9 ; GCN-DAG: s_mov_b32 s32, s33
; HSA-DAG: s_mov_b32 s32, s9
; GCN: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}} ; GCN: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}}
; GCN-NEXT: s_endpgm ; GCN-NEXT: s_endpgm
@ -395,10 +384,11 @@ define amdgpu_kernel void @test_call_external_void_func_v32i32() #0 {
} }
; GCN-LABEL: {{^}}test_call_external_void_func_v32i32_i32: ; GCN-LABEL: {{^}}test_call_external_void_func_v32i32_i32:
; HSA-DAG: s_add_u32 [[SP_REG:s[0-9]+]], s9, 0x100{{$}} ; HSA-DAG: s_mov_b32 s33, s9
; HSA-DAG: s_add_u32 [[SP_REG:s[0-9]+]], s33, 0x100{{$}}
; MESA-DAG: s_mov_b32 s33, s3{{$}} ; MESA-DAG: s_mov_b32 s33, s3{{$}}
; MESA-DAG: s_add_u32 [[SP_REG:s[0-9]+]], s3, 0x100{{$}} ; MESA-DAG: s_add_u32 [[SP_REG:s[0-9]+]], s33, 0x100{{$}}
; GCN-DAG: buffer_load_dword [[VAL1:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} ; GCN-DAG: buffer_load_dword [[VAL1:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
; GCN-DAG: buffer_load_dwordx4 v[0:3], off ; GCN-DAG: buffer_load_dwordx4 v[0:3], off
@ -447,29 +437,27 @@ define amdgpu_kernel void @test_call_external_void_func_struct_i8_i32() #0 {
} }
; GCN-LABEL: {{^}}test_call_external_void_func_byval_struct_i8_i32: ; GCN-LABEL: {{^}}test_call_external_void_func_byval_struct_i8_i32:
; MESA-DAG: s_add_u32 [[SP:s[0-9]+]], s1, 0x400{{$}} ; GCN-DAG: s_add_u32 [[SP:s[0-9]+]], s33, 0x400{{$}}
; HSA-DAG: s_add_u32 [[SP:s[0-9]+]], s7, 0x400{{$}}
; GCN-DAG: v_mov_b32_e32 [[VAL0:v[0-9]+]], 3 ; GCN-DAG: v_mov_b32_e32 [[VAL0:v[0-9]+]], 3
; GCN-DAG: v_mov_b32_e32 [[VAL1:v[0-9]+]], 8 ; GCN-DAG: v_mov_b32_e32 [[VAL1:v[0-9]+]], 8
; MESA-DAG: buffer_store_byte [[VAL0]], off, s[36:39], s1 offset:8 ; MESA-DAG: buffer_store_byte [[VAL0]], off, s[36:39], s33 offset:8
; MESA-DAG: buffer_store_dword [[VAL1]], off, s[36:39], s1 offset:12 ; MESA-DAG: buffer_store_dword [[VAL1]], off, s[36:39], s33 offset:12
; HSA-DAG: s_mov_b32 s33, s7 ; HSA-DAG: buffer_store_byte [[VAL0]], off, s[0:3], s33 offset:8
; HSA-DAG: buffer_store_byte [[VAL0]], off, s[0:3], s{{7|33}} offset:8 ; HSA-DAG: buffer_store_dword [[VAL1]], off, s[0:3], s33 offset:12
; HSA-DAG: buffer_store_dword [[VAL1]], off, s[0:3], s{{7|33}} offset:12
; GCN: s_add_u32 [[SP]], [[SP]], 0x200 ; GCN: s_add_u32 [[SP]], [[SP]], 0x200
; HSA: buffer_load_dword [[RELOAD_VAL0:v[0-9]+]], off, s[0:3], s{{7|33}} offset:8 ; HSA: buffer_load_dword [[RELOAD_VAL0:v[0-9]+]], off, s[0:3], s33 offset:8
; HSA: buffer_load_dword [[RELOAD_VAL1:v[0-9]+]], off, s[0:3], s{{7|33}} offset:12 ; HSA: buffer_load_dword [[RELOAD_VAL1:v[0-9]+]], off, s[0:3], s33 offset:12
; HSA: buffer_store_dword [[RELOAD_VAL1]], off, s[0:3], [[SP]] offset:8 ; HSA: buffer_store_dword [[RELOAD_VAL1]], off, s[0:3], [[SP]] offset:8
; HSA: buffer_store_dword [[RELOAD_VAL0]], off, s[0:3], [[SP]] offset:4 ; HSA: buffer_store_dword [[RELOAD_VAL0]], off, s[0:3], [[SP]] offset:4
; MESA: buffer_load_dword [[RELOAD_VAL0:v[0-9]+]], off, s[36:39], s1 offset:8 ; MESA: buffer_load_dword [[RELOAD_VAL0:v[0-9]+]], off, s[36:39], s33 offset:8
; MESA: buffer_load_dword [[RELOAD_VAL1:v[0-9]+]], off, s[36:39], s1 offset:12 ; MESA: buffer_load_dword [[RELOAD_VAL1:v[0-9]+]], off, s[36:39], s33 offset:12
; MESA: buffer_store_dword [[RELOAD_VAL1]], off, s[36:39], [[SP]] offset:8 ; MESA: buffer_store_dword [[RELOAD_VAL1]], off, s[36:39], [[SP]] offset:8
; MESA: buffer_store_dword [[RELOAD_VAL0]], off, s[36:39], [[SP]] offset:4 ; MESA: buffer_store_dword [[RELOAD_VAL0]], off, s[36:39], [[SP]] offset:4
@ -502,8 +490,8 @@ define amdgpu_kernel void @test_call_external_void_func_byval_struct_i8_i32() #0
; GCN: buffer_store_dword [[RELOAD_VAL1]], off, s{{\[[0-9]+:[0-9]+\]}}, [[SP]] offset:8 ; GCN: buffer_store_dword [[RELOAD_VAL1]], off, s{{\[[0-9]+:[0-9]+\]}}, [[SP]] offset:8
; GCN: buffer_store_dword [[RELOAD_VAL0]], off, s{{\[[0-9]+:[0-9]+\]}}, [[SP]] offset:4 ; GCN: buffer_store_dword [[RELOAD_VAL0]], off, s{{\[[0-9]+:[0-9]+\]}}, [[SP]] offset:4
; GCN-NEXT: s_swappc_b64 ; GCN-NEXT: s_swappc_b64
; GCN-DAG: buffer_load_ubyte [[LOAD_OUT_VAL0:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, s33 offset:16 ; GCN-DAG: buffer_load_ubyte [[LOAD_OUT_VAL0:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, [[FP_REG]] offset:16
; GCN-DAG: buffer_load_dword [[LOAD_OUT_VAL1:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, s33 offset:20 ; GCN-DAG: buffer_load_dword [[LOAD_OUT_VAL1:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, [[FP_REG]] offset:20
; GCN: s_sub_u32 [[SP]], [[SP]], 0x200 ; GCN: s_sub_u32 [[SP]], [[SP]], 0x200
; GCN: buffer_store_byte [[LOAD_OUT_VAL0]], off ; GCN: buffer_store_byte [[LOAD_OUT_VAL0]], off

View File

@ -5,12 +5,12 @@
declare void @external_void_func_void() #0 declare void @external_void_func_void() #0
; GCN-LABEL: {{^}}test_kernel_call_external_void_func_void_clobber_s30_s31_call_external_void_func_void: ; GCN-LABEL: {{^}}test_kernel_call_external_void_func_void_clobber_s30_s31_call_external_void_func_void:
; GCN: s_mov_b32 s33, s7
; GCN: s_getpc_b64 s[34:35] ; GCN: s_getpc_b64 s[34:35]
; GCN-NEXT: s_add_u32 s34, s34, ; GCN-NEXT: s_add_u32 s34, s34,
; GCN-NEXT: s_addc_u32 s35, s35, ; GCN-NEXT: s_addc_u32 s35, s35,
; GCN-NEXT: s_mov_b32 s4, s7 ; GCN-NEXT: s_mov_b32 s4, s33
; GCN-NEXT: s_mov_b32 s33, s7 ; GCN-NEXT: s_mov_b32 s32, s33
; GCN-NEXT: s_mov_b32 s32, s7
; GCN: s_swappc_b64 s[30:31], s[34:35] ; GCN: s_swappc_b64 s[30:31], s[34:35]
; GCN-NEXT: s_mov_b32 s4, s33 ; GCN-NEXT: s_mov_b32 s4, s33
@ -112,13 +112,14 @@ define amdgpu_kernel void @test_call_void_func_void_mayclobber_v31(i32 addrspace
} }
; GCN-LABEL: {{^}}test_call_void_func_void_preserves_s33: ; GCN-LABEL: {{^}}test_call_void_func_void_preserves_s33:
; GCN: s_mov_b32 s34, s9
; GCN: ; def s33 ; GCN: ; def s33
; GCN-NEXT: #ASMEND ; GCN-NEXT: #ASMEND
; GCN: s_getpc_b64 s[6:7] ; GCN: s_getpc_b64 s[6:7]
; GCN-NEXT: s_add_u32 s6, s6, external_void_func_void@rel32@lo+4 ; GCN-NEXT: s_add_u32 s6, s6, external_void_func_void@rel32@lo+4
; GCN-NEXT: s_addc_u32 s7, s7, external_void_func_void@rel32@hi+4 ; GCN-NEXT: s_addc_u32 s7, s7, external_void_func_void@rel32@hi+4
; GCN-NEXT: s_mov_b32 s4, s9 ; GCN-NEXT: s_mov_b32 s4, s34
; GCN-NEXT: s_mov_b32 s32, s9 ; GCN-NEXT: s_mov_b32 s32, s34
; GCN-NEXT: s_swappc_b64 s[30:31], s[6:7] ; GCN-NEXT: s_swappc_b64 s[30:31], s[6:7]
; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: ;;#ASMSTART
; GCN-NEXT: ; use s33 ; GCN-NEXT: ; use s33
@ -132,13 +133,14 @@ define amdgpu_kernel void @test_call_void_func_void_preserves_s33(i32 addrspace(
} }
; GCN-LABEL: {{^}}test_call_void_func_void_preserves_v32: ; GCN-LABEL: {{^}}test_call_void_func_void_preserves_v32:
; GCN: s_mov_b32 s33, s9
; GCN: ; def v32 ; GCN: ; def v32
; GCN-NEXT: #ASMEND ; GCN-NEXT: #ASMEND
; GCN: s_getpc_b64 s[6:7] ; GCN: s_getpc_b64 s[6:7]
; GCN-NEXT: s_add_u32 s6, s6, external_void_func_void@rel32@lo+4 ; GCN-NEXT: s_add_u32 s6, s6, external_void_func_void@rel32@lo+4
; GCN-NEXT: s_addc_u32 s7, s7, external_void_func_void@rel32@hi+4 ; GCN-NEXT: s_addc_u32 s7, s7, external_void_func_void@rel32@hi+4
; GCN-NEXT: s_mov_b32 s4, s9 ; GCN-NEXT: s_mov_b32 s4, s33
; GCN-NEXT: s_mov_b32 s32, s9 ; GCN-NEXT: s_mov_b32 s32, s33
; GCN-NEXT: s_swappc_b64 s[30:31], s[6:7] ; GCN-NEXT: s_swappc_b64 s[30:31], s[6:7]
; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: ;;#ASMSTART
; GCN-NEXT: ; use v32 ; GCN-NEXT: ; use v32
@ -165,11 +167,11 @@ define void @void_func_void_clobber_s33() #2 {
; GCN-LABEL: {{^}}test_call_void_func_void_clobber_s33: ; GCN-LABEL: {{^}}test_call_void_func_void_clobber_s33:
; GCN: s_mov_b32 s33, s7 ; GCN: s_mov_b32 s33, s7
; GCN: s_mov_b32 s32, s7
; GCN: s_getpc_b64 ; GCN: s_getpc_b64
; GCN-NEXT: s_add_u32 ; GCN-NEXT: s_add_u32
; GCN-NEXT: s_addc_u32 ; GCN-NEXT: s_addc_u32
; GCN-NEXT: s_mov_b32 s4, s33 ; GCN-NEXT: s_mov_b32 s4, s33
; GCN-NEXT: s_mov_b32 s32, s33
; GCN: s_swappc_b64 ; GCN: s_swappc_b64
; GCN-NEXT: s_endpgm ; GCN-NEXT: s_endpgm
define amdgpu_kernel void @test_call_void_func_void_clobber_s33() #0 { define amdgpu_kernel void @test_call_void_func_void_clobber_s33() #0 {

View File

@ -191,9 +191,11 @@ define void @use_workgroup_id_yz() #1 {
; GCN: enable_sgpr_workgroup_id_z = 0 ; GCN: enable_sgpr_workgroup_id_z = 0
; GCN-NOT: s6 ; GCN-NOT: s6
; GCN: s_mov_b32 s4, s7 ; GCN: s_mov_b32 s33, s7
; GCN-NOT: s6 ; GCN-NOT: s6
; GCN: s_mov_b32 s32, s7 ; GCN: s_mov_b32 s4, s33
; GCN-NOT: s6
; GCN: s_mov_b32 s32, s33
; GCN: s_swappc_b64 ; GCN: s_swappc_b64
define amdgpu_kernel void @kern_indirect_use_workgroup_id_x() #1 { define amdgpu_kernel void @kern_indirect_use_workgroup_id_x() #1 {
call void @use_workgroup_id_x() call void @use_workgroup_id_x()
@ -206,9 +208,9 @@ define amdgpu_kernel void @kern_indirect_use_workgroup_id_x() #1 {
; GCN: enable_sgpr_workgroup_id_z = 0 ; GCN: enable_sgpr_workgroup_id_z = 0
; GCN: s_mov_b32 s33, s8 ; GCN: s_mov_b32 s33, s8
; GCN: s_mov_b32 s32, s8
; GCN: s_mov_b32 s4, s33 ; GCN: s_mov_b32 s4, s33
; GCN: s_mov_b32 s6, s7 ; GCN: s_mov_b32 s6, s7
; GCN: s_mov_b32 s32, s33
; GCN: s_swappc_b64 ; GCN: s_swappc_b64
define amdgpu_kernel void @kern_indirect_use_workgroup_id_y() #1 { define amdgpu_kernel void @kern_indirect_use_workgroup_id_y() #1 {
call void @use_workgroup_id_y() call void @use_workgroup_id_y()
@ -237,10 +239,10 @@ define amdgpu_kernel void @kern_indirect_use_workgroup_id_z() #1 {
; GCN: s_mov_b32 s33, s8 ; GCN: s_mov_b32 s33, s8
; GCN-NOT: s6 ; GCN-NOT: s6
; GCN-NOT: s7 ; GCN-NOT: s7
; GCN: s_mov_b32 s32, s8 ; GCN: s_mov_b32 s4, s33
; GCN-NOT: s6 ; GCN-NOT: s6
; GCN-NOT: s7 ; GCN-NOT: s7
; GCN: s_mov_b32 s4, s33 ; GCN: s_mov_b32 s32, s33
; GCN-NOT: s6 ; GCN-NOT: s6
; GCN-NOT: s7 ; GCN-NOT: s7
; GCN: s_swappc_b64 ; GCN: s_swappc_b64
@ -254,17 +256,19 @@ define amdgpu_kernel void @kern_indirect_use_workgroup_id_xy() #1 {
; GCN: enable_sgpr_workgroup_id_y = 1 ; GCN: enable_sgpr_workgroup_id_y = 1
; GCN: enable_sgpr_workgroup_id_z = 1 ; GCN: enable_sgpr_workgroup_id_z = 1
; GCN-NOT: s6 ; GCN: s_mov_b32 s33, s9
; GCN-NOT: s7
; GCN-NOT: s8
; GCN: s_mov_b32 s4, s9
; GCN-NOT: s6 ; GCN-NOT: s6
; GCN-NOT: s7 ; GCN-NOT: s7
; GCN-NOT: s8 ; GCN-NOT: s8
; GCN: s_mov_b32 s32, s9 ; GCN: s_mov_b32 s4, s33
; GCN-NOT: s6
; GCN-NOT: s7
; GCN-NOT: s8
; GCN: s_mov_b32 s32, s33
; GCN-NOT: s6 ; GCN-NOT: s6
; GCN-NOT: s7 ; GCN-NOT: s7
@ -285,11 +289,11 @@ define amdgpu_kernel void @kern_indirect_use_workgroup_id_xyz() #1 {
; GCN-NOT: s6 ; GCN-NOT: s6
; GCN-NOT: s7 ; GCN-NOT: s7
; GCN: s_mov_b32 s32, s8 ; GCN: s_mov_b32 s4, s33
; GCN-NOT: s6 ; GCN-NOT: s6
; GCN-NOT: s7 ; GCN-NOT: s7
; GCN: s_mov_b32 s4, s33 ; GCN: s_mov_b32 s32, s33
; GCN-NOT: s6 ; GCN-NOT: s6
; GCN-NOT: s7 ; GCN-NOT: s7
@ -304,10 +308,11 @@ define amdgpu_kernel void @kern_indirect_use_workgroup_id_xz() #1 {
; GCN: enable_sgpr_workgroup_id_y = 1 ; GCN: enable_sgpr_workgroup_id_y = 1
; GCN: enable_sgpr_workgroup_id_z = 1 ; GCN: enable_sgpr_workgroup_id_z = 1
; GCN: s_mov_b32 s33, s9
; GCN: s_mov_b32 s6, s7 ; GCN: s_mov_b32 s6, s7
; GCN: s_mov_b32 s4, s9 ; GCN: s_mov_b32 s4, s33
; GCN: s_mov_b32 s7, s8 ; GCN: s_mov_b32 s7, s8
; GCN: s_mov_b32 s32, s9 ; GCN: s_mov_b32 s32, s33
; GCN: s_swappc_b64 ; GCN: s_swappc_b64
define amdgpu_kernel void @kern_indirect_use_workgroup_id_yz() #1 { define amdgpu_kernel void @kern_indirect_use_workgroup_id_yz() #1 {
call void @use_workgroup_id_yz() call void @use_workgroup_id_yz()
@ -371,12 +376,13 @@ define void @other_arg_use_workgroup_id_z(i32 %arg0) #1 {
; GCN: enable_sgpr_workgroup_id_y = 0 ; GCN: enable_sgpr_workgroup_id_y = 0
; GCN: enable_sgpr_workgroup_id_z = 0 ; GCN: enable_sgpr_workgroup_id_z = 0
; GCN-DAG: s_mov_b32 s33, s7
; GCN-DAG: v_mov_b32_e32 v0, 0x22b ; GCN-DAG: v_mov_b32_e32 v0, 0x22b
; GCN-NOT: s6 ; GCN-NOT: s6
; GCN: s_mov_b32 s4, s7 ; GCN: s_mov_b32 s4, s33
; GCN-NOT: s6 ; GCN-NOT: s6
; GCN-DAG: s_mov_b32 s32, s7 ; GCN-DAG: s_mov_b32 s32, s33
; GCN: s_swappc_b64 ; GCN: s_swappc_b64
define amdgpu_kernel void @kern_indirect_other_arg_use_workgroup_id_x() #1 { define amdgpu_kernel void @kern_indirect_other_arg_use_workgroup_id_x() #1 {
call void @other_arg_use_workgroup_id_x(i32 555) call void @other_arg_use_workgroup_id_x(i32 555)
@ -389,10 +395,10 @@ define amdgpu_kernel void @kern_indirect_other_arg_use_workgroup_id_x() #1 {
; GCN: enable_sgpr_workgroup_id_z = 0 ; GCN: enable_sgpr_workgroup_id_z = 0
; GCN-DAG: s_mov_b32 s33, s8 ; GCN-DAG: s_mov_b32 s33, s8
; GCN-DAG: s_mov_b32 s32, s8
; GCN-DAG: v_mov_b32_e32 v0, 0x22b ; GCN-DAG: v_mov_b32_e32 v0, 0x22b
; GCN: s_mov_b32 s4, s33 ; GCN: s_mov_b32 s4, s33
; GCN-DAG: s_mov_b32 s6, s7 ; GCN-DAG: s_mov_b32 s6, s7
; GCN-DAG: s_mov_b32 s32, s33
; GCN: s_swappc_b64 ; GCN: s_swappc_b64
define amdgpu_kernel void @kern_indirect_other_arg_use_workgroup_id_y() #1 { define amdgpu_kernel void @kern_indirect_other_arg_use_workgroup_id_y() #1 {
call void @other_arg_use_workgroup_id_y(i32 555) call void @other_arg_use_workgroup_id_y(i32 555)
@ -405,11 +411,11 @@ define amdgpu_kernel void @kern_indirect_other_arg_use_workgroup_id_y() #1 {
; GCN: enable_sgpr_workgroup_id_z = 1 ; GCN: enable_sgpr_workgroup_id_z = 1
; GCN: s_mov_b32 s33, s8 ; GCN: s_mov_b32 s33, s8
; GCN: s_mov_b32 s32, s8
; GCN-DAG: v_mov_b32_e32 v0, 0x22b ; GCN-DAG: v_mov_b32_e32 v0, 0x22b
; GCN: s_mov_b32 s4, s33 ; GCN: s_mov_b32 s4, s33
; GCN-DAG: s_mov_b32 s6, s7 ; GCN-DAG: s_mov_b32 s6, s7
; GCN: s_mov_b32 s32, s33
; GCN: s_swappc_b64 ; GCN: s_swappc_b64
define amdgpu_kernel void @kern_indirect_other_arg_use_workgroup_id_z() #1 { define amdgpu_kernel void @kern_indirect_other_arg_use_workgroup_id_z() #1 {
call void @other_arg_use_workgroup_id_z(i32 555) call void @other_arg_use_workgroup_id_z(i32 555)
@ -469,12 +475,13 @@ define void @use_every_sgpr_input() #1 {
; GCN: enable_sgpr_dispatch_id = 1 ; GCN: enable_sgpr_dispatch_id = 1
; GCN: enable_sgpr_flat_scratch_init = 1 ; GCN: enable_sgpr_flat_scratch_init = 1
; GCN: s_mov_b32 s33, s17
; GCN: s_mov_b64 s[12:13], s[10:11] ; GCN: s_mov_b64 s[12:13], s[10:11]
; GCN: s_mov_b64 s[10:11], s[8:9] ; GCN: s_mov_b64 s[10:11], s[8:9]
; GCN: s_mov_b64 s[8:9], s[6:7] ; GCN: s_mov_b64 s[8:9], s[6:7]
; GCN: s_mov_b64 s[6:7], s[4:5] ; GCN: s_mov_b64 s[6:7], s[4:5]
; GCN: s_mov_b32 s4, s17 ; GCN: s_mov_b32 s4, s33
; GCN: s_mov_b32 s32, s17 ; GCN: s_mov_b32 s32, s33
; GCN: s_swappc_b64 ; GCN: s_swappc_b64
define amdgpu_kernel void @kern_indirect_use_every_sgpr_input() #1 { define amdgpu_kernel void @kern_indirect_use_every_sgpr_input() #1 {
call void @use_every_sgpr_input() call void @use_every_sgpr_input()
@ -540,18 +547,16 @@ define void @func_use_every_sgpr_input_call_use_workgroup_id_xyz() #1 {
; GCN: s_mov_b32 s5, s32 ; GCN: s_mov_b32 s5, s32
; GCN: s_add_u32 s32, s32, 0x300 ; GCN: s_add_u32 s32, s32, 0x300
; GCN: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[6:7] ; GCN-DAG: s_mov_b32 [[SAVE_X:s[0-9]+]], s14
; GCN: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[10:11] ; GCN-DAG: s_mov_b32 [[SAVE_Y:s[0-9]+]], s15
; GCN: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[8:9] ; GCN-DAG: s_mov_b32 [[SAVE_Z:s[0-9]+]], s16
; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[6:7]
; GCN: s_mov_b32 s6, s14 ; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[8:9]
; GCN: s_mov_b32 s7, s15 ; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[10:11]
; GCN: s_mov_b32 s8, s16
; GCN: s_mov_b32 [[SAVE_Z:s[0-9]+]], s16
; GCN: s_mov_b32 [[SAVE_Y:s[0-9]+]], s15
; GCN: s_mov_b32 [[SAVE_X:s[0-9]+]], s14
; GCN-DAG: s_mov_b32 s6, [[SAVE_X]]
; GCN-DAG: s_mov_b32 s7, [[SAVE_Y]]
; GCN-DAG: s_mov_b32 s8, [[SAVE_Z]]
; GCN: s_swappc_b64 ; GCN: s_swappc_b64
; GCN: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s5 offset:4 ; GCN: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s5 offset:4

View File

@ -288,8 +288,8 @@ define void @too_many_args_use_workitem_id_x(
; GCN-LABEL: {{^}}kern_call_too_many_args_use_workitem_id_x: ; GCN-LABEL: {{^}}kern_call_too_many_args_use_workitem_id_x:
; GCN: enable_vgpr_workitem_id = 0 ; GCN: enable_vgpr_workitem_id = 0
; GCN: s_mov_b32 s32, s7
; GCN: s_mov_b32 s33, s7 ; GCN: s_mov_b32 s33, s7
; GCN: s_mov_b32 s32, s33
; GCN: buffer_store_dword v0, off, s[0:3], s32 offset:8 ; GCN: buffer_store_dword v0, off, s[0:3], s32 offset:8
; GCN: s_mov_b32 s4, s33 ; GCN: s_mov_b32 s4, s33
; GCN: s_swappc_b64 ; GCN: s_swappc_b64
@ -422,16 +422,15 @@ define void @too_many_args_use_workitem_id_x_byval(
; GCN-LABEL: {{^}}kern_call_too_many_args_use_workitem_id_x_byval: ; GCN-LABEL: {{^}}kern_call_too_many_args_use_workitem_id_x_byval:
; GCN: enable_vgpr_workitem_id = 0 ; GCN: enable_vgpr_workitem_id = 0
; GCN: s_add_u32 s32, s7, 0x200{{$}}
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0x3e7{{$}}
; GCN: s_add_u32 s32, s32, 0x100{{$}}
; GCN: buffer_store_dword [[K]], off, s[0:3], s7 offset:4
; GCN: buffer_store_dword v0, off, s[0:3], s32 offset:12
; GCN: buffer_load_dword [[RELOAD_BYVAL:v[0-9]+]], off, s[0:3], s7 offset:4
; GCN: s_mov_b32 s33, s7 ; GCN: s_mov_b32 s33, s7
; GCN: s_add_u32 s32, s33, 0x200{{$}}
; GCN-DAG: s_add_u32 s32, s32, 0x100{{$}}
; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x3e7{{$}}
; GCN: buffer_store_dword [[K]], off, s[0:3], s33 offset:4
; GCN: buffer_store_dword v0, off, s[0:3], s32 offset:12
; GCN: buffer_load_dword [[RELOAD_BYVAL:v[0-9]+]], off, s[0:3], s33 offset:4
; GCN: buffer_store_dword [[RELOAD_BYVAL]], off, s[0:3], s32 offset:4{{$}} ; GCN: buffer_store_dword [[RELOAD_BYVAL]], off, s[0:3], s32 offset:4{{$}}
; GCN: v_mov_b32_e32 [[RELOAD_BYVAL]], ; GCN: v_mov_b32_e32 [[RELOAD_BYVAL]],
; GCN: s_swappc_b64 ; GCN: s_swappc_b64
@ -549,8 +548,8 @@ define void @too_many_args_use_workitem_id_xyz(
; GCN-LABEL: {{^}}kern_call_too_many_args_use_workitem_id_xyz: ; GCN-LABEL: {{^}}kern_call_too_many_args_use_workitem_id_xyz:
; GCN: enable_vgpr_workitem_id = 2 ; GCN: enable_vgpr_workitem_id = 2
; GCN: s_mov_b32 s32, s7
; GCN: s_mov_b32 s33, s7 ; GCN: s_mov_b32 s33, s7
; GCN: s_mov_b32 s32, s33
; GCN-DAG: buffer_store_dword v0, off, s[0:3], s32 offset:8 ; GCN-DAG: buffer_store_dword v0, off, s[0:3], s32 offset:8
; GCN-DAG: buffer_store_dword v1, off, s[0:3], s32 offset:12 ; GCN-DAG: buffer_store_dword v1, off, s[0:3], s32 offset:12
@ -644,8 +643,8 @@ define void @too_many_args_use_workitem_id_x_stack_yz(
; GCN-LABEL: {{^}}kern_call_too_many_args_use_workitem_id_x_stack_yz: ; GCN-LABEL: {{^}}kern_call_too_many_args_use_workitem_id_x_stack_yz:
; GCN: enable_vgpr_workitem_id = 2 ; GCN: enable_vgpr_workitem_id = 2
; GCN: s_mov_b32 s32, s7
; GCN: s_mov_b32 s33, s7 ; GCN: s_mov_b32 s33, s7
; GCN: s_mov_b32 s32, s33
; GCN-DAG: v_mov_b32_e32 v31, v0 ; GCN-DAG: v_mov_b32_e32 v31, v0
; GCN-DAG: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; GCN-DAG: buffer_store_dword v1, off, s[0:3], s32 offset:8

View File

@ -5,49 +5,49 @@
; Test addressing modes when the scratch base is not a frame index. ; Test addressing modes when the scratch base is not a frame index.
; GCN-LABEL: {{^}}store_private_offset_i8: ; GCN-LABEL: {{^}}store_private_offset_i8:
; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s1 offset:8 ; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s2 offset:8
define amdgpu_kernel void @store_private_offset_i8() #0 { define amdgpu_kernel void @store_private_offset_i8() #0 {
store volatile i8 5, i8* inttoptr (i32 8 to i8*) store volatile i8 5, i8* inttoptr (i32 8 to i8*)
ret void ret void
} }
; GCN-LABEL: {{^}}store_private_offset_i16: ; GCN-LABEL: {{^}}store_private_offset_i16:
; GCN: buffer_store_short v{{[0-9]+}}, off, s[4:7], s1 offset:8 ; GCN: buffer_store_short v{{[0-9]+}}, off, s[4:7], s2 offset:8
define amdgpu_kernel void @store_private_offset_i16() #0 { define amdgpu_kernel void @store_private_offset_i16() #0 {
store volatile i16 5, i16* inttoptr (i32 8 to i16*) store volatile i16 5, i16* inttoptr (i32 8 to i16*)
ret void ret void
} }
; GCN-LABEL: {{^}}store_private_offset_i32: ; GCN-LABEL: {{^}}store_private_offset_i32:
; GCN: buffer_store_dword v{{[0-9]+}}, off, s[4:7], s1 offset:8 ; GCN: buffer_store_dword v{{[0-9]+}}, off, s[4:7], s2 offset:8
define amdgpu_kernel void @store_private_offset_i32() #0 { define amdgpu_kernel void @store_private_offset_i32() #0 {
store volatile i32 5, i32* inttoptr (i32 8 to i32*) store volatile i32 5, i32* inttoptr (i32 8 to i32*)
ret void ret void
} }
; GCN-LABEL: {{^}}store_private_offset_v2i32: ; GCN-LABEL: {{^}}store_private_offset_v2i32:
; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s1 offset:8 ; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8
define amdgpu_kernel void @store_private_offset_v2i32() #0 { define amdgpu_kernel void @store_private_offset_v2i32() #0 {
store volatile <2 x i32> <i32 5, i32 10>, <2 x i32>* inttoptr (i32 8 to <2 x i32>*) store volatile <2 x i32> <i32 5, i32 10>, <2 x i32>* inttoptr (i32 8 to <2 x i32>*)
ret void ret void
} }
; GCN-LABEL: {{^}}store_private_offset_v4i32: ; GCN-LABEL: {{^}}store_private_offset_v4i32:
; GCN: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s1 offset:8 ; GCN: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8
define amdgpu_kernel void @store_private_offset_v4i32() #0 { define amdgpu_kernel void @store_private_offset_v4i32() #0 {
store volatile <4 x i32> <i32 5, i32 10, i32 15, i32 0>, <4 x i32>* inttoptr (i32 8 to <4 x i32>*) store volatile <4 x i32> <i32 5, i32 10, i32 15, i32 0>, <4 x i32>* inttoptr (i32 8 to <4 x i32>*)
ret void ret void
} }
; GCN-LABEL: {{^}}load_private_offset_i8: ; GCN-LABEL: {{^}}load_private_offset_i8:
; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], s1 offset:8 ; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], s2 offset:8
define amdgpu_kernel void @load_private_offset_i8() #0 { define amdgpu_kernel void @load_private_offset_i8() #0 {
%load = load volatile i8, i8* inttoptr (i32 8 to i8*) %load = load volatile i8, i8* inttoptr (i32 8 to i8*)
ret void ret void
} }
; GCN-LABEL: {{^}}sextload_private_offset_i8: ; GCN-LABEL: {{^}}sextload_private_offset_i8:
; GCN: buffer_load_sbyte v{{[0-9]+}}, off, s[4:7], s3 offset:8 ; GCN: buffer_load_sbyte v{{[0-9]+}}, off, s[4:7], s8 offset:8
define amdgpu_kernel void @sextload_private_offset_i8(i32 addrspace(1)* %out) #0 { define amdgpu_kernel void @sextload_private_offset_i8(i32 addrspace(1)* %out) #0 {
%load = load volatile i8, i8* inttoptr (i32 8 to i8*) %load = load volatile i8, i8* inttoptr (i32 8 to i8*)
%sextload = sext i8 %load to i32 %sextload = sext i8 %load to i32
@ -56,7 +56,7 @@ define amdgpu_kernel void @sextload_private_offset_i8(i32 addrspace(1)* %out) #0
} }
; GCN-LABEL: {{^}}zextload_private_offset_i8: ; GCN-LABEL: {{^}}zextload_private_offset_i8:
; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], s3 offset:8 ; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], s8 offset:8
define amdgpu_kernel void @zextload_private_offset_i8(i32 addrspace(1)* %out) #0 { define amdgpu_kernel void @zextload_private_offset_i8(i32 addrspace(1)* %out) #0 {
%load = load volatile i8, i8* inttoptr (i32 8 to i8*) %load = load volatile i8, i8* inttoptr (i32 8 to i8*)
%zextload = zext i8 %load to i32 %zextload = zext i8 %load to i32
@ -65,14 +65,14 @@ define amdgpu_kernel void @zextload_private_offset_i8(i32 addrspace(1)* %out) #0
} }
; GCN-LABEL: {{^}}load_private_offset_i16: ; GCN-LABEL: {{^}}load_private_offset_i16:
; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], s1 offset:8 ; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], s2 offset:8
define amdgpu_kernel void @load_private_offset_i16() #0 { define amdgpu_kernel void @load_private_offset_i16() #0 {
%load = load volatile i16, i16* inttoptr (i32 8 to i16*) %load = load volatile i16, i16* inttoptr (i32 8 to i16*)
ret void ret void
} }
; GCN-LABEL: {{^}}sextload_private_offset_i16: ; GCN-LABEL: {{^}}sextload_private_offset_i16:
; GCN: buffer_load_sshort v{{[0-9]+}}, off, s[4:7], s3 offset:8 ; GCN: buffer_load_sshort v{{[0-9]+}}, off, s[4:7], s8 offset:8
define amdgpu_kernel void @sextload_private_offset_i16(i32 addrspace(1)* %out) #0 { define amdgpu_kernel void @sextload_private_offset_i16(i32 addrspace(1)* %out) #0 {
%load = load volatile i16, i16* inttoptr (i32 8 to i16*) %load = load volatile i16, i16* inttoptr (i32 8 to i16*)
%sextload = sext i16 %load to i32 %sextload = sext i16 %load to i32
@ -81,7 +81,7 @@ define amdgpu_kernel void @sextload_private_offset_i16(i32 addrspace(1)* %out) #
} }
; GCN-LABEL: {{^}}zextload_private_offset_i16: ; GCN-LABEL: {{^}}zextload_private_offset_i16:
; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], s3 offset:8 ; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], s8 offset:8
define amdgpu_kernel void @zextload_private_offset_i16(i32 addrspace(1)* %out) #0 { define amdgpu_kernel void @zextload_private_offset_i16(i32 addrspace(1)* %out) #0 {
%load = load volatile i16, i16* inttoptr (i32 8 to i16*) %load = load volatile i16, i16* inttoptr (i32 8 to i16*)
%zextload = zext i16 %load to i32 %zextload = zext i16 %load to i32
@ -90,28 +90,28 @@ define amdgpu_kernel void @zextload_private_offset_i16(i32 addrspace(1)* %out) #
} }
; GCN-LABEL: {{^}}load_private_offset_i32: ; GCN-LABEL: {{^}}load_private_offset_i32:
; GCN: buffer_load_dword v{{[0-9]+}}, off, s[4:7], s1 offset:8 ; GCN: buffer_load_dword v{{[0-9]+}}, off, s[4:7], s2 offset:8
define amdgpu_kernel void @load_private_offset_i32() #0 { define amdgpu_kernel void @load_private_offset_i32() #0 {
%load = load volatile i32, i32* inttoptr (i32 8 to i32*) %load = load volatile i32, i32* inttoptr (i32 8 to i32*)
ret void ret void
} }
; GCN-LABEL: {{^}}load_private_offset_v2i32: ; GCN-LABEL: {{^}}load_private_offset_v2i32:
; GCN: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s1 offset:8 ; GCN: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8
define amdgpu_kernel void @load_private_offset_v2i32() #0 { define amdgpu_kernel void @load_private_offset_v2i32() #0 {
%load = load volatile <2 x i32>, <2 x i32>* inttoptr (i32 8 to <2 x i32>*) %load = load volatile <2 x i32>, <2 x i32>* inttoptr (i32 8 to <2 x i32>*)
ret void ret void
} }
; GCN-LABEL: {{^}}load_private_offset_v4i32: ; GCN-LABEL: {{^}}load_private_offset_v4i32:
; GCN: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s1 offset:8 ; GCN: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8
define amdgpu_kernel void @load_private_offset_v4i32() #0 { define amdgpu_kernel void @load_private_offset_v4i32() #0 {
%load = load volatile <4 x i32>, <4 x i32>* inttoptr (i32 8 to <4 x i32>*) %load = load volatile <4 x i32>, <4 x i32>* inttoptr (i32 8 to <4 x i32>*)
ret void ret void
} }
; GCN-LABEL: {{^}}store_private_offset_i8_max_offset: ; GCN-LABEL: {{^}}store_private_offset_i8_max_offset:
; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s1 offset:4095 ; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s2 offset:4095
define amdgpu_kernel void @store_private_offset_i8_max_offset() #0 { define amdgpu_kernel void @store_private_offset_i8_max_offset() #0 {
store volatile i8 5, i8* inttoptr (i32 4095 to i8*) store volatile i8 5, i8* inttoptr (i32 4095 to i8*)
ret void ret void
@ -119,7 +119,7 @@ define amdgpu_kernel void @store_private_offset_i8_max_offset() #0 {
; GCN-LABEL: {{^}}store_private_offset_i8_max_offset_plus1: ; GCN-LABEL: {{^}}store_private_offset_i8_max_offset_plus1:
; GCN: v_mov_b32_e32 [[OFFSET:v[0-9]+]], 0x1000 ; GCN: v_mov_b32_e32 [[OFFSET:v[0-9]+]], 0x1000
; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s1 offen{{$}} ; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s2 offen{{$}}
define amdgpu_kernel void @store_private_offset_i8_max_offset_plus1() #0 { define amdgpu_kernel void @store_private_offset_i8_max_offset_plus1() #0 {
store volatile i8 5, i8* inttoptr (i32 4096 to i8*) store volatile i8 5, i8* inttoptr (i32 4096 to i8*)
ret void ret void
@ -127,7 +127,7 @@ define amdgpu_kernel void @store_private_offset_i8_max_offset_plus1() #0 {
; GCN-LABEL: {{^}}store_private_offset_i8_max_offset_plus2: ; GCN-LABEL: {{^}}store_private_offset_i8_max_offset_plus2:
; GCN: v_mov_b32_e32 [[OFFSET:v[0-9]+]], 0x1000 ; GCN: v_mov_b32_e32 [[OFFSET:v[0-9]+]], 0x1000
; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s1 offen offset:1{{$}} ; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s2 offen offset:1{{$}}
define amdgpu_kernel void @store_private_offset_i8_max_offset_plus2() #0 { define amdgpu_kernel void @store_private_offset_i8_max_offset_plus2() #0 {
store volatile i8 5, i8* inttoptr (i32 4097 to i8*) store volatile i8 5, i8* inttoptr (i32 4097 to i8*)
ret void ret void

View File

@ -78,7 +78,7 @@ ENDIF: ; preds = %LOOP
; Uses a copy intsead of an or ; Uses a copy intsead of an or
; GCN: s_mov_b64 [[COPY:s\[[0-9]+:[0-9]+\]]], [[BREAK_REG]] ; GCN: s_mov_b64 [[COPY:s\[[0-9]+:[0-9]+\]]], [[BREAK_REG]]
; GCN: s_or_b64 [[BREAK_REG]], exec, [[BREAK_REG]] ; GCN: s_or_b64 [[BREAK_REG]], exec, [[COPY]]
define amdgpu_kernel void @multi_if_break_loop(i32 %arg) #0 { define amdgpu_kernel void @multi_if_break_loop(i32 %arg) #0 {
bb: bb:
%id = call i32 @llvm.amdgcn.workitem.id.x() %id = call i32 @llvm.amdgcn.workitem.id.x()

View File

@ -1,6 +1,6 @@
; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI -check-prefix=OPT %s ; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI -check-prefix=OPT %s
; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=CI -check-prefix=OPT %s ; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=CI -check-prefix=OPT %s
; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=iceland -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI -check-prefix=OPTICELAND %s ; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=iceland -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI -check-prefix=OPT %s
; RUN: llc -O0 -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=OPTNONE %s ; RUN: llc -O0 -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=OPTNONE %s
; There are no stack objects, but still a private memory access. The ; There are no stack objects, but still a private memory access. The
@ -8,12 +8,10 @@
; shifted down to the end of the used registers. ; shifted down to the end of the used registers.
; GCN-LABEL: {{^}}store_to_undef: ; GCN-LABEL: {{^}}store_to_undef:
; OPT: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s5 offen{{$}} ; OPT-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1]
; The -mcpu=iceland case doesn't copy-propagate the same as the other two opt cases because the temp registers %SGPR88_SGPR89_SGPR90_SGPR91 and %SGPR93 are marked as non-allocatable by this subtarget. ; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3]
; OPTICELAND-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1] ; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s5{{$}}
; OPTICELAND-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3] ; OPT: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offen{{$}}
; OPTICELAND-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s5{{$}}
; OPTICELAND: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offen{{$}}
; -O0 should assume spilling, so the input scratch resource descriptor ; -O0 should assume spilling, so the input scratch resource descriptor
; -should be used directly without any copies. ; -should be used directly without any copies.
@ -26,21 +24,30 @@ define amdgpu_kernel void @store_to_undef() #0 {
} }
; GCN-LABEL: {{^}}store_to_inttoptr: ; GCN-LABEL: {{^}}store_to_inttoptr:
; OPT: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s5 offset:124{{$}} ; OPT-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1]
; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3]
; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s5{{$}}
; OPT: buffer_store_dword v{{[0-9]+}}, off, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offset:124{{$}}
define amdgpu_kernel void @store_to_inttoptr() #0 { define amdgpu_kernel void @store_to_inttoptr() #0 {
store volatile i32 0, i32* inttoptr (i32 124 to i32*) store volatile i32 0, i32* inttoptr (i32 124 to i32*)
ret void ret void
} }
; GCN-LABEL: {{^}}load_from_undef: ; GCN-LABEL: {{^}}load_from_undef:
; OPT: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s5 offen{{$}} ; OPT-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1]
; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3]
; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s5{{$}}
; OPT: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offen{{$}}
define amdgpu_kernel void @load_from_undef() #0 { define amdgpu_kernel void @load_from_undef() #0 {
%ld = load volatile i32, i32* undef %ld = load volatile i32, i32* undef
ret void ret void
} }
; GCN-LABEL: {{^}}load_from_inttoptr: ; GCN-LABEL: {{^}}load_from_inttoptr:
; OPT: buffer_load_dword v{{[0-9]+}}, off, s[0:3], s5 offset:124{{$}} ; OPT-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1]
; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3]
; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s5{{$}}
; OPT: buffer_load_dword v{{[0-9]+}}, off, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offset:124{{$}}
define amdgpu_kernel void @load_from_inttoptr() #0 { define amdgpu_kernel void @load_from_inttoptr() #0 {
%ld = load volatile i32, i32* inttoptr (i32 124 to i32*) %ld = load volatile i32, i32* inttoptr (i32 124 to i32*)
ret void ret void

View File

@ -2,10 +2,10 @@
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; GCN-LABEL: {{^}}vgpr: ; GCN-LABEL: {{^}}vgpr:
; GCN-DAG: v_mov_b32_e32 v1, v0 ; GCN: v_mov_b32_e32 v1, v0
; GCN-DAG: exp mrt0 v0, v0, v0, v0 done vm ; GCN-DAG: v_add_f32_e32 v0, 1.0, v1
; GCN-DAG: exp mrt0 v1, v1, v1, v1 done vm
; GCN: s_waitcnt expcnt(0) ; GCN: s_waitcnt expcnt(0)
; GCN: v_add_f32_e32 v0, 1.0, v0
; GCN-NOT: s_endpgm ; GCN-NOT: s_endpgm
define amdgpu_vs { float, float } @vgpr([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 { define amdgpu_vs { float, float } @vgpr([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 {
bb: bb:
@ -204,13 +204,13 @@ bb:
} }
; GCN-LABEL: {{^}}both: ; GCN-LABEL: {{^}}both:
; GCN-DAG: exp mrt0 v0, v0, v0, v0 done vm ; GCN: v_mov_b32_e32 v1, v0
; GCN-DAG: v_mov_b32_e32 v1, v0 ; GCN-DAG: exp mrt0 v1, v1, v1, v1 done vm
; GCN-DAG: s_mov_b32 s1, s2 ; GCN-DAG: v_add_f32_e32 v0, 1.0, v1
; GCN: s_waitcnt expcnt(0)
; GCN: v_add_f32_e32 v0, 1.0, v0
; GCN-DAG: s_add_i32 s0, s3, 2 ; GCN-DAG: s_add_i32 s0, s3, 2
; GCN-DAG: s_mov_b32 s2, s3 ; GCN-DAG: s_mov_b32 s1, s2
; GCN: s_mov_b32 s2, s3
; GCN: s_waitcnt expcnt(0)
; GCN-NOT: s_endpgm ; GCN-NOT: s_endpgm
define amdgpu_vs { float, i32, float, i32, i32 } @both([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 { define amdgpu_vs { float, i32, float, i32, i32 } @both([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 {
bb: bb:

View File

@ -287,8 +287,7 @@ define i32 @test_cmpxchg_fail_order(i32 *%addr, i32 %desired, i32 %new) {
%pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst monotonic %pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst monotonic
%oldval = extractvalue { i32, i1 } %pair, 0 %oldval = extractvalue { i32, i1 } %pair, 0
; CHECK-ARMV7: mov r[[ADDR:[0-9]+]], r0 ; CHECK-ARMV7: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]]
; CHECK-ARMV7: ldrex [[OLDVAL:r[0-9]+]], [r0]
; CHECK-ARMV7: cmp [[OLDVAL]], r1 ; CHECK-ARMV7: cmp [[OLDVAL]], r1
; CHECK-ARMV7: bne [[FAIL_BB:\.?LBB[0-9]+_[0-9]+]] ; CHECK-ARMV7: bne [[FAIL_BB:\.?LBB[0-9]+_[0-9]+]]
; CHECK-ARMV7: dmb ish ; CHECK-ARMV7: dmb ish
@ -306,8 +305,7 @@ define i32 @test_cmpxchg_fail_order(i32 *%addr, i32 %desired, i32 %new) {
; CHECK-ARMV7: dmb ish ; CHECK-ARMV7: dmb ish
; CHECK-ARMV7: bx lr ; CHECK-ARMV7: bx lr
; CHECK-T2: mov r[[ADDR:[0-9]+]], r0 ; CHECK-T2: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]]
; CHECK-T2: ldrex [[OLDVAL:r[0-9]+]], [r0]
; CHECK-T2: cmp [[OLDVAL]], r1 ; CHECK-T2: cmp [[OLDVAL]], r1
; CHECK-T2: bne [[FAIL_BB:\.?LBB.*]] ; CHECK-T2: bne [[FAIL_BB:\.?LBB.*]]
; CHECK-T2: dmb ish ; CHECK-T2: dmb ish

View File

@ -181,7 +181,7 @@ define float @foo_loop(%swift_error** swifterror %error_ptr_ref, i32 %cc, float
; CHECK-APPLE: beq ; CHECK-APPLE: beq
; CHECK-APPLE: mov r0, #16 ; CHECK-APPLE: mov r0, #16
; CHECK-APPLE: malloc ; CHECK-APPLE: malloc
; CHECK-APPLE: strb r{{.*}}, [r0, #8] ; CHECK-APPLE: strb r{{.*}}, [{{.*}}[[ID]], #8]
; CHECK-APPLE: ble ; CHECK-APPLE: ble
; CHECK-APPLE: mov r8, [[ID]] ; CHECK-APPLE: mov r8, [[ID]]

View File

@ -165,7 +165,7 @@ entry:
; MMR3: subu16 $5, $[[T19]], $[[T20]] ; MMR3: subu16 $5, $[[T19]], $[[T20]]
; MMR6: move $[[T0:[0-9]+]], $7 ; MMR6: move $[[T0:[0-9]+]], $7
; MMR6: sw $7, 8($sp) ; MMR6: sw $[[T0]], 8($sp)
; MMR6: move $[[T1:[0-9]+]], $5 ; MMR6: move $[[T1:[0-9]+]], $5
; MMR6: sw $4, 12($sp) ; MMR6: sw $4, 12($sp)
; MMR6: lw $[[T2:[0-9]+]], 48($sp) ; MMR6: lw $[[T2:[0-9]+]], 48($sp)

View File

@ -14,8 +14,7 @@ define double @foo3(double %a) nounwind {
ret double %r ret double %r
; CHECK: @foo3 ; CHECK: @foo3
; CHECK: fmr [[REG:[0-9]+]], [[REG2:[0-9]+]] ; CHECK: xsnmsubadp [[REG:[0-9]+]], {{[0-9]+}}, [[REG]]
; CHECK: xsnmsubadp [[REG]], {{[0-9]+}}, [[REG2]]
; CHECK: xsmaddmdp ; CHECK: xsmaddmdp
; CHECK: xsmaddadp ; CHECK: xsmaddadp
} }

View File

@ -75,7 +75,7 @@ entry:
; CHECK-DAG: mr [[REG:[0-9]+]], 3 ; CHECK-DAG: mr [[REG:[0-9]+]], 3
; CHECK-DAG: li 0, 1076 ; CHECK-DAG: li 0, 1076
; CHECK-DAG: stw 3, ; CHECK: stw [[REG]],
; CHECK: #APP ; CHECK: #APP
; CHECK: sc ; CHECK: sc

View File

@ -23,7 +23,7 @@ target triple = "powerpc64le-grtev4-linux-gnu"
;CHECK-LABEL: straight_test: ;CHECK-LABEL: straight_test:
; test1 may have been merged with entry ; test1 may have been merged with entry
;CHECK: mr [[TAGREG:[0-9]+]], 3 ;CHECK: mr [[TAGREG:[0-9]+]], 3
;CHECK: andi. {{[0-9]+}}, [[TAGREG:[0-9]+]], 1 ;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1
;CHECK-NEXT: bc 12, 1, .[[OPT1LABEL:[_0-9A-Za-z]+]] ;CHECK-NEXT: bc 12, 1, .[[OPT1LABEL:[_0-9A-Za-z]+]]
;CHECK-NEXT: # %test2 ;CHECK-NEXT: # %test2
;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30 ;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30

View File

@ -156,9 +156,9 @@ define double @floatarg(double %a0, ; %i0,%i1
; HARD-NEXT: std %o0, [%sp+96] ; HARD-NEXT: std %o0, [%sp+96]
; HARD-NEXT: st %o1, [%sp+92] ; HARD-NEXT: st %o1, [%sp+92]
; HARD-NEXT: mov %i0, %o2 ; HARD-NEXT: mov %i0, %o2
; HARD-NEXT: mov %i1, %o3 ; HARD-NEXT: mov %o0, %o3
; HARD-NEXT: mov %o1, %o4 ; HARD-NEXT: mov %o1, %o4
; HARD-NEXT: mov %i1, %o5 ; HARD-NEXT: mov %o0, %o5
; HARD-NEXT: call floatarg ; HARD-NEXT: call floatarg
; HARD: std %f0, [%i4] ; HARD: std %f0, [%i4]
; SOFT: st %i0, [%sp+104] ; SOFT: st %i0, [%sp+104]

View File

@ -235,9 +235,8 @@ entry:
; CHECK-LABEL: test_load_add_i32 ; CHECK-LABEL: test_load_add_i32
; CHECK: membar ; CHECK: membar
; CHECK: mov [[U:%[gilo][0-7]]], [[V:%[gilo][0-7]]] ; CHECK: add [[V:%[gilo][0-7]]], %o1, [[U:%[gilo][0-7]]]
; CHECK: add [[U:%[gilo][0-7]]], %o1, [[V2:%[gilo][0-7]]] ; CHECK: cas [%o0], [[V]], [[U]]
; CHECK: cas [%o0], [[V]], [[V2]]
; CHECK: membar ; CHECK: membar
define zeroext i32 @test_load_add_i32(i32* %p, i32 zeroext %v) { define zeroext i32 @test_load_add_i32(i32* %p, i32 zeroext %v) {
entry: entry:

View File

@ -598,7 +598,7 @@ declare void @abort() #0
define i32 @b_to_bx(i32 %value) { define i32 @b_to_bx(i32 %value) {
; CHECK-LABEL: b_to_bx: ; CHECK-LABEL: b_to_bx:
; DISABLE: push {r7, lr} ; DISABLE: push {r7, lr}
; CHECK: cmp r0, #49 ; CHECK: cmp r1, #49
; CHECK-NEXT: bgt [[ELSE_LABEL:LBB[0-9_]+]] ; CHECK-NEXT: bgt [[ELSE_LABEL:LBB[0-9_]+]]
; ENABLE: push {r7, lr} ; ENABLE: push {r7, lr}

View File

@ -7,7 +7,7 @@ define i32 @f(i32 %a, i32 %b) {
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl %ecx, %edx ; CHECK-NEXT: movl %ecx, %edx
; CHECK-NEXT: imull %ecx, %edx ; CHECK-NEXT: imull %edx, %edx
; CHECK-NEXT: imull %eax, %ecx ; CHECK-NEXT: imull %eax, %ecx
; CHECK-NEXT: imull %eax, %eax ; CHECK-NEXT: imull %eax, %eax
; CHECK-NEXT: addl %edx, %eax ; CHECK-NEXT: addl %edx, %eax

View File

@ -106,7 +106,7 @@ entry:
; CHECK-DAG: movl %edx, %[[r1:[^ ]*]] ; CHECK-DAG: movl %edx, %[[r1:[^ ]*]]
; CHECK-DAG: movl 8(%ebp), %[[r2:[^ ]*]] ; CHECK-DAG: movl 8(%ebp), %[[r2:[^ ]*]]
; CHECK-DAG: movl %[[r2]], 4(%esp) ; CHECK-DAG: movl %[[r2]], 4(%esp)
; CHECK-DAG: movl %edx, (%esp) ; CHECK-DAG: movl %[[r1]], (%esp)
; CHECK: movl %esp, %[[reg:[^ ]*]] ; CHECK: movl %esp, %[[reg:[^ ]*]]
; CHECK: pushl %[[reg]] ; CHECK: pushl %[[reg]]
; CHECK: calll _addrof_i64 ; CHECK: calll _addrof_i64

View File

@ -407,6 +407,7 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) {
; SSE2-NEXT: pand %xmm0, %xmm2 ; SSE2-NEXT: pand %xmm0, %xmm2
; SSE2-NEXT: packuswb %xmm1, %xmm2 ; SSE2-NEXT: packuswb %xmm1, %xmm2
; SSE2-NEXT: packuswb %xmm10, %xmm2 ; SSE2-NEXT: packuswb %xmm10, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm1
; SSE2-NEXT: psrld $1, %xmm4 ; SSE2-NEXT: psrld $1, %xmm4
; SSE2-NEXT: psrld $1, %xmm12 ; SSE2-NEXT: psrld $1, %xmm12
; SSE2-NEXT: pand %xmm0, %xmm12 ; SSE2-NEXT: pand %xmm0, %xmm12
@ -443,7 +444,7 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) {
; SSE2-NEXT: movdqu %xmm7, (%rax) ; SSE2-NEXT: movdqu %xmm7, (%rax)
; SSE2-NEXT: movdqu %xmm11, (%rax) ; SSE2-NEXT: movdqu %xmm11, (%rax)
; SSE2-NEXT: movdqu %xmm13, (%rax) ; SSE2-NEXT: movdqu %xmm13, (%rax)
; SSE2-NEXT: movdqu %xmm2, (%rax) ; SSE2-NEXT: movdqu %xmm1, (%rax)
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; ;
; AVX1-LABEL: avg_v64i8: ; AVX1-LABEL: avg_v64i8:

View File

@ -12,11 +12,11 @@ define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>*
; CHECK-NEXT: movq %rdx, %r14 ; CHECK-NEXT: movq %rdx, %r14
; CHECK-NEXT: movq %rsi, %r15 ; CHECK-NEXT: movq %rsi, %r15
; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movq %rdi, %rbx
; CHECK-NEXT: vmovaps (%rdi), %ymm0 ; CHECK-NEXT: vmovaps (%rbx), %ymm0
; CHECK-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill ; CHECK-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
; CHECK-NEXT: vmovaps (%rsi), %ymm1 ; CHECK-NEXT: vmovaps (%r15), %ymm1
; CHECK-NEXT: vmovups %ymm1, {{[0-9]+}}(%rsp) # 32-byte Spill ; CHECK-NEXT: vmovups %ymm1, {{[0-9]+}}(%rsp) # 32-byte Spill
; CHECK-NEXT: vmovaps (%rdx), %ymm2 ; CHECK-NEXT: vmovaps (%r14), %ymm2
; CHECK-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill ; CHECK-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill
; CHECK-NEXT: callq dummy ; CHECK-NEXT: callq dummy
; CHECK-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload ; CHECK-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload

View File

@ -9,10 +9,10 @@ define void @bar__512(<16 x i32>* %var) #0 {
; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: subq $112, %rsp ; CHECK-NEXT: subq $112, %rsp
; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: movq %rdi, %rbx
; CHECK-NEXT: vmovups (%rdi), %zmm0 ; CHECK-NEXT: vmovups (%rbx), %zmm0
; CHECK-NEXT: vmovups %zmm0, (%rsp) ## 64-byte Spill ; CHECK-NEXT: vmovups %zmm0, (%rsp) ## 64-byte Spill
; CHECK-NEXT: vbroadcastss {{.*}}(%rip), %zmm1 ; CHECK-NEXT: vbroadcastss {{.*}}(%rip), %zmm1
; CHECK-NEXT: vmovaps %zmm1, (%rdi) ; CHECK-NEXT: vmovaps %zmm1, (%rbx)
; CHECK-NEXT: callq _Print__512 ; CHECK-NEXT: callq _Print__512
; CHECK-NEXT: vmovups (%rsp), %zmm0 ## 64-byte Reload ; CHECK-NEXT: vmovups (%rsp), %zmm0 ## 64-byte Reload
; CHECK-NEXT: callq _Print__512 ; CHECK-NEXT: callq _Print__512

View File

@ -466,7 +466,7 @@ define i32 @test12(i32 %a1, i32 %a2, i32 %b1) {
; KNL_X32-NEXT: movl %edi, (%esp) ; KNL_X32-NEXT: movl %edi, (%esp)
; KNL_X32-NEXT: calll _test11 ; KNL_X32-NEXT: calll _test11
; KNL_X32-NEXT: movl %eax, %ebx ; KNL_X32-NEXT: movl %eax, %ebx
; KNL_X32-NEXT: movzbl %al, %eax ; KNL_X32-NEXT: movzbl %bl, %eax
; KNL_X32-NEXT: movl %eax, {{[0-9]+}}(%esp) ; KNL_X32-NEXT: movl %eax, {{[0-9]+}}(%esp)
; KNL_X32-NEXT: movl %esi, {{[0-9]+}}(%esp) ; KNL_X32-NEXT: movl %esi, {{[0-9]+}}(%esp)
; KNL_X32-NEXT: movl %edi, (%esp) ; KNL_X32-NEXT: movl %edi, (%esp)

View File

@ -1171,6 +1171,7 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; KNL-NEXT: kmovw %esi, %k0 ; KNL-NEXT: kmovw %esi, %k0
; KNL-NEXT: kshiftlw $7, %k0, %k2 ; KNL-NEXT: kshiftlw $7, %k0, %k2
; KNL-NEXT: kshiftrw $15, %k2, %k2 ; KNL-NEXT: kshiftrw $15, %k2, %k2
; KNL-NEXT: kmovw %k2, %eax
; KNL-NEXT: kshiftlw $6, %k0, %k0 ; KNL-NEXT: kshiftlw $6, %k0, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k0 ; KNL-NEXT: kshiftrw $15, %k0, %k0
; KNL-NEXT: kmovw %k0, %ecx ; KNL-NEXT: kmovw %k0, %ecx
@ -1183,7 +1184,8 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0 ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kshiftlw $1, %k0, %k0 ; KNL-NEXT: kshiftlw $1, %k0, %k0
; KNL-NEXT: kshiftrw $1, %k0, %k0 ; KNL-NEXT: kshiftrw $1, %k0, %k0
; KNL-NEXT: kshiftlw $7, %k2, %k1 ; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: kshiftlw $7, %k1, %k1
; KNL-NEXT: korw %k1, %k0, %k1 ; KNL-NEXT: korw %k1, %k0, %k1
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovqw %zmm0, %xmm0 ; KNL-NEXT: vpmovqw %zmm0, %xmm0
@ -1195,16 +1197,20 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; SKX-NEXT: kmovd %esi, %k1 ; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kshiftlw $7, %k1, %k2 ; SKX-NEXT: kshiftlw $7, %k1, %k2
; SKX-NEXT: kshiftrw $15, %k2, %k2 ; SKX-NEXT: kshiftrw $15, %k2, %k2
; SKX-NEXT: kmovd %k2, %eax
; SKX-NEXT: kshiftlw $6, %k1, %k1 ; SKX-NEXT: kshiftlw $6, %k1, %k1
; SKX-NEXT: kshiftrw $15, %k1, %k1 ; SKX-NEXT: kshiftrw $15, %k1, %k1
; SKX-NEXT: kmovd %k1, %ecx
; SKX-NEXT: vpmovm2q %k0, %zmm0 ; SKX-NEXT: vpmovm2q %k0, %zmm0
; SKX-NEXT: vpmovm2q %k1, %zmm1 ; SKX-NEXT: kmovd %ecx, %k0
; SKX-NEXT: vpmovm2q %k0, %zmm1
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7] ; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
; SKX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 ; SKX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; SKX-NEXT: vpmovq2m %zmm2, %k0 ; SKX-NEXT: vpmovq2m %zmm2, %k0
; SKX-NEXT: kshiftlb $1, %k0, %k0 ; SKX-NEXT: kshiftlb $1, %k0, %k0
; SKX-NEXT: kshiftrb $1, %k0, %k0 ; SKX-NEXT: kshiftrb $1, %k0, %k0
; SKX-NEXT: kshiftlb $7, %k2, %k1 ; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: kshiftlb $7, %k1, %k1
; SKX-NEXT: korb %k1, %k0, %k0 ; SKX-NEXT: korb %k1, %k0, %k0
; SKX-NEXT: vpmovm2w %k0, %xmm0 ; SKX-NEXT: vpmovm2w %k0, %xmm0
; SKX-NEXT: vzeroupper ; SKX-NEXT: vzeroupper
@ -1216,6 +1222,7 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; AVX512BW-NEXT: kmovd %esi, %k0 ; AVX512BW-NEXT: kmovd %esi, %k0
; AVX512BW-NEXT: kshiftlw $7, %k0, %k2 ; AVX512BW-NEXT: kshiftlw $7, %k0, %k2
; AVX512BW-NEXT: kshiftrw $15, %k2, %k2 ; AVX512BW-NEXT: kshiftrw $15, %k2, %k2
; AVX512BW-NEXT: kmovd %k2, %eax
; AVX512BW-NEXT: kshiftlw $6, %k0, %k0 ; AVX512BW-NEXT: kshiftlw $6, %k0, %k0
; AVX512BW-NEXT: kshiftrw $15, %k0, %k0 ; AVX512BW-NEXT: kshiftrw $15, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %ecx ; AVX512BW-NEXT: kmovd %k0, %ecx
@ -1228,7 +1235,8 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0 ; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512BW-NEXT: kshiftlw $1, %k0, %k0 ; AVX512BW-NEXT: kshiftlw $1, %k0, %k0
; AVX512BW-NEXT: kshiftrw $1, %k0, %k0 ; AVX512BW-NEXT: kshiftrw $1, %k0, %k0
; AVX512BW-NEXT: kshiftlw $7, %k2, %k1 ; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: kshiftlw $7, %k1, %k1
; AVX512BW-NEXT: korw %k1, %k0, %k0 ; AVX512BW-NEXT: korw %k1, %k0, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0 ; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill> ; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
@ -1241,16 +1249,20 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; AVX512DQ-NEXT: kmovw %esi, %k1 ; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kshiftlw $7, %k1, %k2 ; AVX512DQ-NEXT: kshiftlw $7, %k1, %k2
; AVX512DQ-NEXT: kshiftrw $15, %k2, %k2 ; AVX512DQ-NEXT: kshiftrw $15, %k2, %k2
; AVX512DQ-NEXT: kmovw %k2, %eax
; AVX512DQ-NEXT: kshiftlw $6, %k1, %k1 ; AVX512DQ-NEXT: kshiftlw $6, %k1, %k1
; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1 ; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
; AVX512DQ-NEXT: kmovw %k1, %ecx
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0 ; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
; AVX512DQ-NEXT: vpmovm2q %k1, %zmm1 ; AVX512DQ-NEXT: kmovw %ecx, %k0
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm1
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7] ; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
; AVX512DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 ; AVX512DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; AVX512DQ-NEXT: vpmovq2m %zmm2, %k0 ; AVX512DQ-NEXT: vpmovq2m %zmm2, %k0
; AVX512DQ-NEXT: kshiftlb $1, %k0, %k0 ; AVX512DQ-NEXT: kshiftlb $1, %k0, %k0
; AVX512DQ-NEXT: kshiftrb $1, %k0, %k0 ; AVX512DQ-NEXT: kshiftrb $1, %k0, %k0
; AVX512DQ-NEXT: kshiftlb $7, %k2, %k1 ; AVX512DQ-NEXT: kmovw %eax, %k1
; AVX512DQ-NEXT: kshiftlb $7, %k1, %k1
; AVX512DQ-NEXT: korb %k1, %k0, %k0 ; AVX512DQ-NEXT: korb %k1, %k0, %k0
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0 ; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
; AVX512DQ-NEXT: vpmovqw %zmm0, %xmm0 ; AVX512DQ-NEXT: vpmovqw %zmm0, %xmm0

View File

@ -2005,7 +2005,7 @@ define i64 @test_mask_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
; AVX512F-32-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm2 ; AVX512F-32-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm2
; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm0[4,5,6,7] ; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm0[4,5,6,7]
; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0 ; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0
; AVX512F-32-NEXT: movl %ecx, %eax ; AVX512F-32-NEXT: movl %esi, %eax
; AVX512F-32-NEXT: shrl $30, %eax ; AVX512F-32-NEXT: shrl $30, %eax
; AVX512F-32-NEXT: kmovd %eax, %k1 ; AVX512F-32-NEXT: kmovd %eax, %k1
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0 ; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0
@ -2016,7 +2016,7 @@ define i64 @test_mask_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
; AVX512F-32-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm1 ; AVX512F-32-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm1
; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7] ; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7]
; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0 ; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0
; AVX512F-32-NEXT: movl %ecx, %eax ; AVX512F-32-NEXT: movl %esi, %eax
; AVX512F-32-NEXT: shrl $31, %eax ; AVX512F-32-NEXT: shrl $31, %eax
; AVX512F-32-NEXT: kmovd %eax, %k1 ; AVX512F-32-NEXT: kmovd %eax, %k1
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0 ; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0
@ -2891,7 +2891,7 @@ define i64 @test_mask_x86_avx512_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %m
; AVX512F-32-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm2 ; AVX512F-32-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm2
; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm0[4,5,6,7] ; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm0[4,5,6,7]
; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0 ; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0
; AVX512F-32-NEXT: movl %ecx, %eax ; AVX512F-32-NEXT: movl %esi, %eax
; AVX512F-32-NEXT: shrl $30, %eax ; AVX512F-32-NEXT: shrl $30, %eax
; AVX512F-32-NEXT: kmovd %eax, %k1 ; AVX512F-32-NEXT: kmovd %eax, %k1
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0 ; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0
@ -2902,7 +2902,7 @@ define i64 @test_mask_x86_avx512_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %m
; AVX512F-32-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm1 ; AVX512F-32-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm1
; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7] ; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7]
; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0 ; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0
; AVX512F-32-NEXT: movl %ecx, %eax ; AVX512F-32-NEXT: movl %esi, %eax
; AVX512F-32-NEXT: shrl $31, %eax ; AVX512F-32-NEXT: shrl $31, %eax
; AVX512F-32-NEXT: kmovd %eax, %k1 ; AVX512F-32-NEXT: kmovd %eax, %k1
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0 ; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0

View File

@ -546,7 +546,7 @@ define <8 x i32> @ext_i8_8i32(i8 %a0) {
; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] ; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-SSSE3-NEXT: pslld $31, %xmm0 ; SSE2-SSSE3-NEXT: pslld $31, %xmm0
; SSE2-SSSE3-NEXT: psrad $31, %xmm0 ; SSE2-SSSE3-NEXT: psrad $31, %xmm0
; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@ -722,7 +722,7 @@ define <16 x i16> @ext_i16_16i16(i16 %a0) {
; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] ; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-SSSE3-NEXT: psllw $15, %xmm0 ; SSE2-SSSE3-NEXT: psllw $15, %xmm0
; SSE2-SSSE3-NEXT: psraw $15, %xmm0 ; SSE2-SSSE3-NEXT: psraw $15, %xmm0
; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] ; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
@ -1753,7 +1753,7 @@ define <16 x i32> @ext_i16_16i32(i16 %a0) {
; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm1 ; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm1
; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-SSSE3-NEXT: pslld $31, %xmm0 ; SSE2-SSSE3-NEXT: pslld $31, %xmm0
; SSE2-SSSE3-NEXT: psrad $31, %xmm0 ; SSE2-SSSE3-NEXT: psrad $31, %xmm0
; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@ -2103,7 +2103,7 @@ define <32 x i16> @ext_i32_32i16(i32 %a0) {
; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] ; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-SSSE3-NEXT: psllw $15, %xmm0 ; SSE2-SSSE3-NEXT: psllw $15, %xmm0
; SSE2-SSSE3-NEXT: psraw $15, %xmm0 ; SSE2-SSSE3-NEXT: psraw $15, %xmm0
; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] ; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]

View File

@ -649,7 +649,7 @@ define <8 x i32> @ext_i8_8i32(i8 %a0) {
; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] ; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1] ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0 ; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@ -808,7 +808,7 @@ define <16 x i16> @ext_i16_16i16(i16 %a0) {
; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] ; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1] ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1]
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0 ; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] ; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
@ -1667,7 +1667,7 @@ define <16 x i32> @ext_i16_16i32(i16 %a0) {
; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm1 ; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm1
; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1] ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1]
; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0 ; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0
; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@ -2008,7 +2008,7 @@ define <32 x i16> @ext_i32_32i16(i32 %a0) {
; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] ; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1,1,1,1,1] ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1,1,1,1,1]
; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0 ; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0
; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] ; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]

View File

@ -38,7 +38,7 @@ define <4 x float> @test_negative_zero_1(<4 x float> %A) {
; SSE2-LABEL: test_negative_zero_1: ; SSE2-LABEL: test_negative_zero_1:
; SSE2: # BB#0: # %entry ; SSE2: # BB#0: # %entry
; SSE2-NEXT: movaps %xmm0, %xmm1 ; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1] ; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero ; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: xorps %xmm2, %xmm2 ; SSE2-NEXT: xorps %xmm2, %xmm2

View File

@ -231,8 +231,8 @@ define <4 x double> @combine_vec_fcopysign_fpext_sgn(<4 x double> %x, <4 x float
; SSE-NEXT: cvtss2sd %xmm2, %xmm4 ; SSE-NEXT: cvtss2sd %xmm2, %xmm4
; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm2[1,1,3,3] ; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
; SSE-NEXT: movaps %xmm2, %xmm6 ; SSE-NEXT: movaps %xmm2, %xmm6
; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm2[1],xmm6[1] ; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm6[1,1]
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm2[2,3] ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
; SSE-NEXT: movaps {{.*#+}} xmm7 ; SSE-NEXT: movaps {{.*#+}} xmm7
; SSE-NEXT: movaps %xmm0, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: andps %xmm7, %xmm2 ; SSE-NEXT: andps %xmm7, %xmm2
@ -247,7 +247,7 @@ define <4 x double> @combine_vec_fcopysign_fpext_sgn(<4 x double> %x, <4 x float
; SSE-NEXT: orps %xmm0, %xmm4 ; SSE-NEXT: orps %xmm0, %xmm4
; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm4[0] ; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm4[0]
; SSE-NEXT: movaps %xmm1, %xmm0 ; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1] ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: andps %xmm7, %xmm0 ; SSE-NEXT: andps %xmm7, %xmm0
; SSE-NEXT: cvtss2sd %xmm3, %xmm3 ; SSE-NEXT: cvtss2sd %xmm3, %xmm3
; SSE-NEXT: andps %xmm8, %xmm3 ; SSE-NEXT: andps %xmm8, %xmm3
@ -294,7 +294,7 @@ define <4 x float> @combine_vec_fcopysign_fptrunc_sgn(<4 x float> %x, <4 x doubl
; SSE-NEXT: orps %xmm6, %xmm1 ; SSE-NEXT: orps %xmm6, %xmm1
; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE-NEXT: movaps %xmm3, %xmm1 ; SSE-NEXT: movaps %xmm3, %xmm1
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm3[1],xmm1[1] ; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: andps %xmm5, %xmm1 ; SSE-NEXT: andps %xmm5, %xmm1
; SSE-NEXT: xorps %xmm6, %xmm6 ; SSE-NEXT: xorps %xmm6, %xmm6
; SSE-NEXT: cvtsd2ss %xmm2, %xmm6 ; SSE-NEXT: cvtsd2ss %xmm2, %xmm6

View File

@ -14,7 +14,7 @@ define <2 x float> @complex_square_f32(<2 x float>) #0 {
; SSE: # BB#0: ; SSE: # BB#0:
; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE-NEXT: movaps %xmm0, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: addss %xmm0, %xmm2 ; SSE-NEXT: addss %xmm2, %xmm2
; SSE-NEXT: mulss %xmm1, %xmm2 ; SSE-NEXT: mulss %xmm1, %xmm2
; SSE-NEXT: mulss %xmm0, %xmm0 ; SSE-NEXT: mulss %xmm0, %xmm0
; SSE-NEXT: mulss %xmm1, %xmm1 ; SSE-NEXT: mulss %xmm1, %xmm1
@ -58,9 +58,9 @@ define <2 x double> @complex_square_f64(<2 x double>) #0 {
; SSE-LABEL: complex_square_f64: ; SSE-LABEL: complex_square_f64:
; SSE: # BB#0: ; SSE: # BB#0:
; SSE-NEXT: movaps %xmm0, %xmm1 ; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1] ; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: movaps %xmm0, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: addsd %xmm0, %xmm2 ; SSE-NEXT: addsd %xmm2, %xmm2
; SSE-NEXT: mulsd %xmm1, %xmm2 ; SSE-NEXT: mulsd %xmm1, %xmm2
; SSE-NEXT: mulsd %xmm0, %xmm0 ; SSE-NEXT: mulsd %xmm0, %xmm0
; SSE-NEXT: mulsd %xmm1, %xmm1 ; SSE-NEXT: mulsd %xmm1, %xmm1
@ -161,9 +161,9 @@ define <2 x double> @complex_mul_f64(<2 x double>, <2 x double>) #0 {
; SSE-LABEL: complex_mul_f64: ; SSE-LABEL: complex_mul_f64:
; SSE: # BB#0: ; SSE: # BB#0:
; SSE-NEXT: movaps %xmm0, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] ; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: movaps %xmm1, %xmm3 ; SSE-NEXT: movaps %xmm1, %xmm3
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm1[1],xmm3[1] ; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
; SSE-NEXT: movaps %xmm3, %xmm4 ; SSE-NEXT: movaps %xmm3, %xmm4
; SSE-NEXT: mulsd %xmm0, %xmm4 ; SSE-NEXT: mulsd %xmm0, %xmm4
; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: mulsd %xmm1, %xmm0

View File

@ -318,7 +318,7 @@ define i64 @PR23590(i64 %x) nounwind {
; X64: # BB#0: # %entry ; X64: # BB#0: # %entry
; X64-NEXT: movq %rdi, %rcx ; X64-NEXT: movq %rdi, %rcx
; X64-NEXT: movabsq $6120523590596543007, %rdx # imm = 0x54F077C718E7C21F ; X64-NEXT: movabsq $6120523590596543007, %rdx # imm = 0x54F077C718E7C21F
; X64-NEXT: movq %rdi, %rax ; X64-NEXT: movq %rcx, %rax
; X64-NEXT: mulq %rdx ; X64-NEXT: mulq %rdx
; X64-NEXT: shrq $12, %rdx ; X64-NEXT: shrq $12, %rdx
; X64-NEXT: imulq $12345, %rdx, %rax # imm = 0x3039 ; X64-NEXT: imulq $12345, %rdx, %rax # imm = 0x3039

View File

@ -18,7 +18,7 @@ declare <8 x double> @llvm.maxnum.v8f64(<8 x double>, <8 x double>)
; CHECK-LABEL: @test_fmaxf ; CHECK-LABEL: @test_fmaxf
; SSE: movaps %xmm0, %xmm2 ; SSE: movaps %xmm0, %xmm2
; SSE-NEXT: cmpunordss %xmm0, %xmm2 ; SSE-NEXT: cmpunordss %xmm2, %xmm2
; SSE-NEXT: movaps %xmm2, %xmm3 ; SSE-NEXT: movaps %xmm2, %xmm3
; SSE-NEXT: andps %xmm1, %xmm3 ; SSE-NEXT: andps %xmm1, %xmm3
; SSE-NEXT: maxss %xmm0, %xmm1 ; SSE-NEXT: maxss %xmm0, %xmm1
@ -47,7 +47,7 @@ define float @test_fmaxf_minsize(float %x, float %y) minsize {
; CHECK-LABEL: @test_fmax ; CHECK-LABEL: @test_fmax
; SSE: movapd %xmm0, %xmm2 ; SSE: movapd %xmm0, %xmm2
; SSE-NEXT: cmpunordsd %xmm0, %xmm2 ; SSE-NEXT: cmpunordsd %xmm2, %xmm2
; SSE-NEXT: movapd %xmm2, %xmm3 ; SSE-NEXT: movapd %xmm2, %xmm3
; SSE-NEXT: andpd %xmm1, %xmm3 ; SSE-NEXT: andpd %xmm1, %xmm3
; SSE-NEXT: maxsd %xmm0, %xmm1 ; SSE-NEXT: maxsd %xmm0, %xmm1
@ -74,7 +74,7 @@ define x86_fp80 @test_fmaxl(x86_fp80 %x, x86_fp80 %y) {
; CHECK-LABEL: @test_intrinsic_fmaxf ; CHECK-LABEL: @test_intrinsic_fmaxf
; SSE: movaps %xmm0, %xmm2 ; SSE: movaps %xmm0, %xmm2
; SSE-NEXT: cmpunordss %xmm0, %xmm2 ; SSE-NEXT: cmpunordss %xmm2, %xmm2
; SSE-NEXT: movaps %xmm2, %xmm3 ; SSE-NEXT: movaps %xmm2, %xmm3
; SSE-NEXT: andps %xmm1, %xmm3 ; SSE-NEXT: andps %xmm1, %xmm3
; SSE-NEXT: maxss %xmm0, %xmm1 ; SSE-NEXT: maxss %xmm0, %xmm1
@ -95,7 +95,7 @@ define float @test_intrinsic_fmaxf(float %x, float %y) {
; CHECK-LABEL: @test_intrinsic_fmax ; CHECK-LABEL: @test_intrinsic_fmax
; SSE: movapd %xmm0, %xmm2 ; SSE: movapd %xmm0, %xmm2
; SSE-NEXT: cmpunordsd %xmm0, %xmm2 ; SSE-NEXT: cmpunordsd %xmm2, %xmm2
; SSE-NEXT: movapd %xmm2, %xmm3 ; SSE-NEXT: movapd %xmm2, %xmm3
; SSE-NEXT: andpd %xmm1, %xmm3 ; SSE-NEXT: andpd %xmm1, %xmm3
; SSE-NEXT: maxsd %xmm0, %xmm1 ; SSE-NEXT: maxsd %xmm0, %xmm1

View File

@ -18,7 +18,7 @@ declare <8 x double> @llvm.minnum.v8f64(<8 x double>, <8 x double>)
; CHECK-LABEL: @test_fminf ; CHECK-LABEL: @test_fminf
; SSE: movaps %xmm0, %xmm2 ; SSE: movaps %xmm0, %xmm2
; SSE-NEXT: cmpunordss %xmm0, %xmm2 ; SSE-NEXT: cmpunordss %xmm2, %xmm2
; SSE-NEXT: movaps %xmm2, %xmm3 ; SSE-NEXT: movaps %xmm2, %xmm3
; SSE-NEXT: andps %xmm1, %xmm3 ; SSE-NEXT: andps %xmm1, %xmm3
; SSE-NEXT: minss %xmm0, %xmm1 ; SSE-NEXT: minss %xmm0, %xmm1
@ -40,7 +40,7 @@ define float @test_fminf(float %x, float %y) {
; CHECK-LABEL: @test_fmin ; CHECK-LABEL: @test_fmin
; SSE: movapd %xmm0, %xmm2 ; SSE: movapd %xmm0, %xmm2
; SSE-NEXT: cmpunordsd %xmm0, %xmm2 ; SSE-NEXT: cmpunordsd %xmm2, %xmm2
; SSE-NEXT: movapd %xmm2, %xmm3 ; SSE-NEXT: movapd %xmm2, %xmm3
; SSE-NEXT: andpd %xmm1, %xmm3 ; SSE-NEXT: andpd %xmm1, %xmm3
; SSE-NEXT: minsd %xmm0, %xmm1 ; SSE-NEXT: minsd %xmm0, %xmm1
@ -67,7 +67,7 @@ define x86_fp80 @test_fminl(x86_fp80 %x, x86_fp80 %y) {
; CHECK-LABEL: @test_intrinsic_fminf ; CHECK-LABEL: @test_intrinsic_fminf
; SSE: movaps %xmm0, %xmm2 ; SSE: movaps %xmm0, %xmm2
; SSE-NEXT: cmpunordss %xmm0, %xmm2 ; SSE-NEXT: cmpunordss %xmm2, %xmm2
; SSE-NEXT: movaps %xmm2, %xmm3 ; SSE-NEXT: movaps %xmm2, %xmm3
; SSE-NEXT: andps %xmm1, %xmm3 ; SSE-NEXT: andps %xmm1, %xmm3
; SSE-NEXT: minss %xmm0, %xmm1 ; SSE-NEXT: minss %xmm0, %xmm1
@ -87,7 +87,7 @@ define float @test_intrinsic_fminf(float %x, float %y) {
; CHECK-LABEL: @test_intrinsic_fmin ; CHECK-LABEL: @test_intrinsic_fmin
; SSE: movapd %xmm0, %xmm2 ; SSE: movapd %xmm0, %xmm2
; SSE-NEXT: cmpunordsd %xmm0, %xmm2 ; SSE-NEXT: cmpunordsd %xmm2, %xmm2
; SSE-NEXT: movapd %xmm2, %xmm3 ; SSE-NEXT: movapd %xmm2, %xmm3
; SSE-NEXT: andpd %xmm1, %xmm3 ; SSE-NEXT: andpd %xmm1, %xmm3
; SSE-NEXT: minsd %xmm0, %xmm1 ; SSE-NEXT: minsd %xmm0, %xmm1

View File

@ -227,7 +227,7 @@ define fp128 @TestI128_4(fp128 %x) #0 {
; CHECK: # BB#0: # %entry ; CHECK: # BB#0: # %entry
; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax ; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax
; CHECK-NEXT: movq %rax, {{[0-9]+}}(%rsp) ; CHECK-NEXT: movq %rax, {{[0-9]+}}(%rsp)
; CHECK-NEXT: movq $0, (%rsp) ; CHECK-NEXT: movq $0, (%rsp)
@ -275,7 +275,7 @@ define fp128 @acosl(fp128 %x) #0 {
; CHECK: # BB#0: # %entry ; CHECK: # BB#0: # %entry
; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax ; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax
; CHECK-NEXT: movq %rax, {{[0-9]+}}(%rsp) ; CHECK-NEXT: movq %rax, {{[0-9]+}}(%rsp)
; CHECK-NEXT: movq $0, (%rsp) ; CHECK-NEXT: movq $0, (%rsp)

View File

@ -908,16 +908,16 @@ define <4 x float> @not_a_hsub_2(<4 x float> %A, <4 x float> %B) {
; SSE-LABEL: not_a_hsub_2: ; SSE-LABEL: not_a_hsub_2:
; SSE: # BB#0: ; SSE: # BB#0:
; SSE-NEXT: movaps %xmm0, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] ; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: movaps %xmm0, %xmm3 ; SSE-NEXT: movaps %xmm0, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3] ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
; SSE-NEXT: subss %xmm3, %xmm2 ; SSE-NEXT: subss %xmm3, %xmm2
; SSE-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3] ; SSE-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; SSE-NEXT: subss %xmm3, %xmm0 ; SSE-NEXT: subss %xmm3, %xmm0
; SSE-NEXT: movaps %xmm1, %xmm3 ; SSE-NEXT: movaps %xmm1, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm1[2,3] ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
; SSE-NEXT: movaps %xmm1, %xmm4 ; SSE-NEXT: movaps %xmm1, %xmm4
; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm1[1],xmm4[1] ; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1]
; SSE-NEXT: subss %xmm4, %xmm3 ; SSE-NEXT: subss %xmm4, %xmm3
; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm1[1,1,3,3] ; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
; SSE-NEXT: subss %xmm4, %xmm1 ; SSE-NEXT: subss %xmm4, %xmm1
@ -965,10 +965,10 @@ define <2 x double> @not_a_hsub_3(<2 x double> %A, <2 x double> %B) {
; SSE-LABEL: not_a_hsub_3: ; SSE-LABEL: not_a_hsub_3:
; SSE: # BB#0: ; SSE: # BB#0:
; SSE-NEXT: movaps %xmm1, %xmm2 ; SSE-NEXT: movaps %xmm1, %xmm2
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm1[1],xmm2[1] ; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: subsd %xmm2, %xmm1 ; SSE-NEXT: subsd %xmm2, %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] ; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: subsd %xmm0, %xmm2 ; SSE-NEXT: subsd %xmm0, %xmm2
; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE-NEXT: movapd %xmm2, %xmm0 ; SSE-NEXT: movapd %xmm2, %xmm0

View File

@ -103,7 +103,7 @@ define <2 x double> @test5_undef(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: test5_undef: ; SSE-LABEL: test5_undef:
; SSE: # BB#0: ; SSE: # BB#0:
; SSE-NEXT: movaps %xmm0, %xmm1 ; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1] ; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: addsd %xmm0, %xmm1 ; SSE-NEXT: addsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0 ; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq ; SSE-NEXT: retq
@ -168,7 +168,7 @@ define <4 x float> @test8_undef(<4 x float> %a, <4 x float> %b) {
; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE-NEXT: addss %xmm0, %xmm1 ; SSE-NEXT: addss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] ; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE-NEXT: addss %xmm2, %xmm0 ; SSE-NEXT: addss %xmm2, %xmm0
; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]

View File

@ -386,7 +386,7 @@ define <4 x float> @test_extend32_vec4(<4 x half>* %p) #0 {
; CHECK-LIBCALL-NEXT: pushq %rbx ; CHECK-LIBCALL-NEXT: pushq %rbx
; CHECK-LIBCALL-NEXT: subq $48, %rsp ; CHECK-LIBCALL-NEXT: subq $48, %rsp
; CHECK-LIBCALL-NEXT: movq %rdi, %rbx ; CHECK-LIBCALL-NEXT: movq %rdi, %rbx
; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi ; CHECK-LIBCALL-NEXT: movzwl (%rbx), %edi
; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee ; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
; CHECK-LIBCALL-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill ; CHECK-LIBCALL-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; CHECK-LIBCALL-NEXT: movzwl 2(%rbx), %edi ; CHECK-LIBCALL-NEXT: movzwl 2(%rbx), %edi
@ -472,7 +472,7 @@ define <4 x double> @test_extend64_vec4(<4 x half>* %p) #0 {
; CHECK-LIBCALL-NEXT: pushq %rbx ; CHECK-LIBCALL-NEXT: pushq %rbx
; CHECK-LIBCALL-NEXT: subq $16, %rsp ; CHECK-LIBCALL-NEXT: subq $16, %rsp
; CHECK-LIBCALL-NEXT: movq %rdi, %rbx ; CHECK-LIBCALL-NEXT: movq %rdi, %rbx
; CHECK-LIBCALL-NEXT: movzwl 4(%rdi), %edi ; CHECK-LIBCALL-NEXT: movzwl 4(%rbx), %edi
; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee ; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
; CHECK-LIBCALL-NEXT: movss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Spill ; CHECK-LIBCALL-NEXT: movss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Spill
; CHECK-LIBCALL-NEXT: movzwl 6(%rbx), %edi ; CHECK-LIBCALL-NEXT: movzwl 6(%rbx), %edi
@ -657,7 +657,7 @@ define void @test_trunc32_vec4(<4 x float> %a, <4 x half>* %p) #0 {
; CHECK-I686-NEXT: movaps %xmm0, {{[0-9]+}}(%esp) # 16-byte Spill ; CHECK-I686-NEXT: movaps %xmm0, {{[0-9]+}}(%esp) # 16-byte Spill
; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %ebp ; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %ebp
; CHECK-I686-NEXT: movaps %xmm0, %xmm1 ; CHECK-I686-NEXT: movaps %xmm0, %xmm1
; CHECK-I686-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3] ; CHECK-I686-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
; CHECK-I686-NEXT: movss %xmm1, (%esp) ; CHECK-I686-NEXT: movss %xmm1, (%esp)
; CHECK-I686-NEXT: calll __gnu_f2h_ieee ; CHECK-I686-NEXT: calll __gnu_f2h_ieee
; CHECK-I686-NEXT: movw %ax, %si ; CHECK-I686-NEXT: movw %ax, %si

View File

@ -162,7 +162,6 @@ define void @testPR4459(x86_fp80 %a) {
; CHECK-NEXT: fstpt (%esp) ; CHECK-NEXT: fstpt (%esp)
; CHECK-NEXT: calll _ceil ; CHECK-NEXT: calll _ceil
; CHECK-NEXT: fld %st(0) ; CHECK-NEXT: fld %st(0)
; CHECK-NEXT: fxch %st(1)
; CHECK-NEXT: ## InlineAsm Start ; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: fistpl %st(0) ; CHECK-NEXT: fistpl %st(0)
; CHECK-NEXT: ## InlineAsm End ; CHECK-NEXT: ## InlineAsm End

View File

@ -24,7 +24,7 @@ define void @bar(i32 %X) {
call void @foo() call void @foo()
; CHECK-LABEL: bar: ; CHECK-LABEL: bar:
; CHECK: callq foo ; CHECK: callq foo
; CHECK-NEXT: movl %edi, %r15d ; CHECK-NEXT: movl %eax, %r15d
call void asm sideeffect "movl $0, %r12d", "{r15}~{r12}"(i32 %X) call void asm sideeffect "movl $0, %r12d", "{r15}~{r12}"(i32 %X)
ret void ret void
} }

View File

@ -27,7 +27,7 @@ define void @print_framealloc_from_fp(i8* %fp) {
; X64-LABEL: print_framealloc_from_fp: ; X64-LABEL: print_framealloc_from_fp:
; X64: movq %rcx, %[[parent_fp:[a-z]+]] ; X64: movq %rcx, %[[parent_fp:[a-z]+]]
; X64: movl .Lalloc_func$frame_escape_0(%rcx), %edx ; X64: movl .Lalloc_func$frame_escape_0(%[[parent_fp]]), %edx
; X64: leaq {{.*}}(%rip), %[[str:[a-z]+]] ; X64: leaq {{.*}}(%rip), %[[str:[a-z]+]]
; X64: movq %[[str]], %rcx ; X64: movq %[[str]], %rcx
; X64: callq printf ; X64: callq printf

View File

@ -159,7 +159,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl %esi ; X32-NEXT: pushl %esi
; X32-NEXT: movl %esi, %ebx ; X32-NEXT: movl %esi, %ebx
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: pushl %edi ; X32-NEXT: pushl %edi
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
@ -752,7 +752,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0
; X32-NEXT: pushl %edi ; X32-NEXT: pushl %edi
; X32-NEXT: movl %ebx, %esi ; X32-NEXT: movl %ebx, %esi
; X32-NEXT: pushl %ebx ; X32-NEXT: pushl %esi
; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0
; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
@ -898,6 +898,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0
; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0
; X32-NEXT: pushl %edi ; X32-NEXT: pushl %edi
; X32-NEXT: movl %edi, %ebx
; X32-NEXT: pushl %esi ; X32-NEXT: pushl %esi
; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0
; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0
@ -909,7 +910,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0
; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0
; X32-NEXT: pushl %edi ; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %esi ; X32-NEXT: pushl %esi
; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0
; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0
@ -1364,7 +1365,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0
; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0
; X32-NEXT: movl %edi, %ebx ; X32-NEXT: movl %edi, %ebx
; X32-NEXT: pushl %edi ; X32-NEXT: pushl %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
; X32-NEXT: pushl %esi ; X32-NEXT: pushl %esi
; X32-NEXT: pushl $0 ; X32-NEXT: pushl $0
@ -2441,7 +2442,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl %edi, %eax ; X32-NEXT: adcl %edi, %eax
; X32-NEXT: movl %eax, %esi ; X32-NEXT: movl %eax, %esi
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: addl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill ; X32-NEXT: addl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
@ -4264,6 +4265,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq $0, %rbp ; X64-NEXT: adcq $0, %rbp
; X64-NEXT: addq %rcx, %rbx ; X64-NEXT: addq %rcx, %rbx
; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rcx, %r11
; X64-NEXT: adcq %rdi, %rbp ; X64-NEXT: adcq %rdi, %rbp
; X64-NEXT: setb %bl ; X64-NEXT: setb %bl
; X64-NEXT: movzbl %bl, %ebx ; X64-NEXT: movzbl %bl, %ebx
@ -4273,12 +4275,12 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: mulq %r8 ; X64-NEXT: mulq %r8
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rcx, %r12 ; X64-NEXT: movq %r11, %r12
; X64-NEXT: movq %rcx, %r8 ; X64-NEXT: movq %r11, %r8
; X64-NEXT: addq %rax, %r12 ; X64-NEXT: addq %rax, %r12
; X64-NEXT: movq %rdi, %rax ; X64-NEXT: movq %rdi, %rax
; X64-NEXT: movq %rdi, %r9 ; X64-NEXT: movq %rdi, %r9
; X64-NEXT: movq %rdi, (%rsp) # 8-byte Spill ; X64-NEXT: movq %r9, (%rsp) # 8-byte Spill
; X64-NEXT: adcq %rdx, %rax ; X64-NEXT: adcq %rdx, %rax
; X64-NEXT: addq %rbp, %r12 ; X64-NEXT: addq %rbp, %r12
; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
@ -4307,7 +4309,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq %rdx, %rbx ; X64-NEXT: adcq %rdx, %rbx
; X64-NEXT: movq 16(%rsi), %rax ; X64-NEXT: movq 16(%rsi), %rax
; X64-NEXT: movq %rsi, %r13 ; X64-NEXT: movq %rsi, %r13
; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: mulq %r11 ; X64-NEXT: mulq %r11
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
@ -4320,7 +4322,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq %rbx, %r11 ; X64-NEXT: adcq %rbx, %r11
; X64-NEXT: movq %r8, %rax ; X64-NEXT: movq %r8, %rax
; X64-NEXT: movq %r8, %rbp ; X64-NEXT: movq %r8, %rbp
; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: addq %rdi, %rax ; X64-NEXT: addq %rdi, %rax
; X64-NEXT: movq %r9, %rax ; X64-NEXT: movq %r9, %rax
; X64-NEXT: adcq %rcx, %rax ; X64-NEXT: adcq %rcx, %rax
@ -4332,7 +4334,8 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %rbx ; X64-NEXT: movq %rax, %rbx
; X64-NEXT: addq %rdi, %rax ; X64-NEXT: addq %rdi, %rax
; X64-NEXT: movq %rdx, %rax ; X64-NEXT: movq %rdi, %r9
; X64-NEXT: movq %rsi, %rax
; X64-NEXT: adcq %rcx, %rax ; X64-NEXT: adcq %rcx, %rax
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq 32(%r13), %rax ; X64-NEXT: movq 32(%r13), %rax
@ -4348,10 +4351,9 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq %rdx, %rax ; X64-NEXT: adcq %rdx, %rax
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rbp, %rax ; X64-NEXT: movq %rbp, %rax
; X64-NEXT: addq %rdi, %rax ; X64-NEXT: addq %r9, %rax
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdi, %r9 ; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
; X64-NEXT: adcq %r15, %rax ; X64-NEXT: adcq %r15, %rax
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
@ -4369,7 +4371,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rsi, %r11 ; X64-NEXT: addq %rsi, %r11
; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: adcq $0, %rbp ; X64-NEXT: adcq $0, %rbp
; X64-NEXT: addq %rbx, %r11 ; X64-NEXT: addq %rcx, %r11
; X64-NEXT: adcq %rsi, %rbp ; X64-NEXT: adcq %rsi, %rbp
; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: setb %bl ; X64-NEXT: setb %bl
@ -4390,11 +4392,11 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq %rbx, %r10 ; X64-NEXT: adcq %rbx, %r10
; X64-NEXT: movq %rcx, %rdx ; X64-NEXT: movq %rcx, %rdx
; X64-NEXT: movq %rcx, %r12 ; X64-NEXT: movq %rcx, %r12
; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: addq %r9, %rdx ; X64-NEXT: addq %r9, %rdx
; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %r11, %r8 ; X64-NEXT: movq %r11, %r8
; X64-NEXT: adcq %r11, %r15 ; X64-NEXT: adcq %r8, %r15
; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq %rax, %r14 ; X64-NEXT: adcq %rax, %r14
; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill
@ -4490,12 +4492,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq %rdx, %r12 ; X64-NEXT: adcq %rdx, %r12
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax ; X64-NEXT: movq %rcx, %rax
; X64-NEXT: mulq %r10 ; X64-NEXT: movq %r10, %rbp
; X64-NEXT: mulq %rbp
; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
; X64-NEXT: movq %rdi, %rax ; X64-NEXT: movq %rdi, %rax
; X64-NEXT: mulq %r10 ; X64-NEXT: mulq %rbp
; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: movq %rax, %rbx ; X64-NEXT: movq %rax, %rbx
; X64-NEXT: addq %rsi, %rbx ; X64-NEXT: addq %rsi, %rbx
@ -4522,7 +4525,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq $0, %r15 ; X64-NEXT: adcq $0, %r15
; X64-NEXT: adcq $0, %r12 ; X64-NEXT: adcq $0, %r12
; X64-NEXT: movq %r10, %rbx ; X64-NEXT: movq %r10, %rbx
; X64-NEXT: movq %r10, %rax ; X64-NEXT: movq %rbx, %rax
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload
; X64-NEXT: mulq %r11 ; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rdx, %rcx
@ -4539,7 +4542,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %rbx, %rax ; X64-NEXT: movq %rbx, %rax
; X64-NEXT: mulq %rcx ; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rcx, %rbx ; X64-NEXT: movq %rcx, %rbx
; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %r8 ; X64-NEXT: movq %rax, %r8
; X64-NEXT: addq %rbp, %r8 ; X64-NEXT: addq %rbp, %r8
@ -4570,7 +4573,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax ; X64-NEXT: movq %rcx, %rax
; X64-NEXT: movq %r11, %rsi ; X64-NEXT: movq %r11, %rsi
; X64-NEXT: mulq %r11 ; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %r11 ; X64-NEXT: movq %rdx, %r11
; X64-NEXT: movq %rax, %r13 ; X64-NEXT: movq %rax, %r13
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload
@ -4650,12 +4653,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq %rdx, %r10 ; X64-NEXT: adcq %rdx, %r10
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax ; X64-NEXT: movq %rcx, %rax
; X64-NEXT: mulq %r11 ; X64-NEXT: movq %r11, %rbp
; X64-NEXT: mulq %rbp
; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rdx, %rdi
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
; X64-NEXT: movq %rsi, %rax ; X64-NEXT: movq %rsi, %rax
; X64-NEXT: mulq %r11 ; X64-NEXT: mulq %rbp
; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: movq %rax, %rbx ; X64-NEXT: movq %rax, %rbx
; X64-NEXT: addq %rdi, %rbx ; X64-NEXT: addq %rdi, %rbx
@ -4785,7 +4789,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %r14 ; X64-NEXT: movq %rax, %r14
; X64-NEXT: movq %r8, %rbp ; X64-NEXT: movq %r8, %rbp
; X64-NEXT: movq %r8, %rax ; X64-NEXT: movq %rbp, %rax
; X64-NEXT: mulq %rcx ; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rcx, %r11 ; X64-NEXT: movq %rcx, %r11
; X64-NEXT: movq %rdx, %rbx ; X64-NEXT: movq %rdx, %rbx
@ -4845,7 +4849,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq $0, %r9 ; X64-NEXT: adcq $0, %r9
; X64-NEXT: adcq $0, %r10 ; X64-NEXT: adcq $0, %r10
; X64-NEXT: movq %rbp, %rsi ; X64-NEXT: movq %rbp, %rsi
; X64-NEXT: movq %rbp, %rax ; X64-NEXT: movq %rsi, %rax
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: mulq %rcx ; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, %r14 ; X64-NEXT: movq %rdx, %r14
@ -4902,8 +4906,8 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq $0, %r15 ; X64-NEXT: adcq $0, %r15
; X64-NEXT: movq %rbp, %rax ; X64-NEXT: movq %rbp, %rax
; X64-NEXT: movq %r8, %rdi ; X64-NEXT: movq %r8, %rdi
; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: mulq %r8 ; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %r9 ; X64-NEXT: movq %rdx, %r9
; X64-NEXT: movq %rax, %r8 ; X64-NEXT: movq %rax, %r8
; X64-NEXT: addq %rbx, %r8 ; X64-NEXT: addq %rbx, %r8
@ -4986,12 +4990,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %rcx, %r14 ; X64-NEXT: movq %rcx, %r14
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax ; X64-NEXT: movq %rcx, %rax
; X64-NEXT: mulq %r10 ; X64-NEXT: movq %r10, %rdi
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %r11 ; X64-NEXT: movq %rdx, %r11
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
; X64-NEXT: movq %rsi, %rax ; X64-NEXT: movq %rsi, %rax
; X64-NEXT: mulq %r10 ; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rdx, %rdi
; X64-NEXT: movq %rax, %rbx ; X64-NEXT: movq %rax, %rbx
; X64-NEXT: addq %r11, %rbx ; X64-NEXT: addq %r11, %rbx
@ -5019,7 +5024,8 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq $0, %r14 ; X64-NEXT: adcq $0, %r14
; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %r13, %rax ; X64-NEXT: movq %r13, %rbx
; X64-NEXT: movq %rbx, %rax
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: mulq %rcx ; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, %r8 ; X64-NEXT: movq %rdx, %r8
@ -5032,7 +5038,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %rax, %rcx ; X64-NEXT: movq %rax, %rcx
; X64-NEXT: addq %r8, %rcx ; X64-NEXT: addq %r8, %rcx
; X64-NEXT: adcq $0, %rsi ; X64-NEXT: adcq $0, %rsi
; X64-NEXT: movq %r13, %rax ; X64-NEXT: movq %rbx, %rax
; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload
; X64-NEXT: mulq %r13 ; X64-NEXT: mulq %r13
; X64-NEXT: movq %rdx, %rbx ; X64-NEXT: movq %rdx, %rbx
@ -5066,12 +5072,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: setb -{{[0-9]+}}(%rsp) # 1-byte Folded Spill ; X64-NEXT: setb -{{[0-9]+}}(%rsp) # 1-byte Folded Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload
; X64-NEXT: movq %rbx, %rax ; X64-NEXT: movq %rbx, %rax
; X64-NEXT: mulq %r10 ; X64-NEXT: movq %r10, %rsi
; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload
; X64-NEXT: movq %r8, %rax ; X64-NEXT: movq %r8, %rax
; X64-NEXT: mulq %r10 ; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %rdi ; X64-NEXT: movq %rax, %rdi
; X64-NEXT: addq %rcx, %rdi ; X64-NEXT: addq %rcx, %rdi
@ -5147,7 +5154,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %r9, %rax ; X64-NEXT: movq %r9, %rax
; X64-NEXT: mulq %rcx ; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rcx, %r10 ; X64-NEXT: movq %rcx, %r10
; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %rdi ; X64-NEXT: movq %rax, %rdi
; X64-NEXT: addq %rsi, %rdi ; X64-NEXT: addq %rsi, %rdi
@ -5159,16 +5166,16 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %rax, %rbx ; X64-NEXT: movq %rax, %rbx
; X64-NEXT: movq %rdx, %r14 ; X64-NEXT: movq %rdx, %r14
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload
; X64-NEXT: addq %rax, %r12 ; X64-NEXT: addq %rbx, %r12
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r15 # 8-byte Reload ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r15 # 8-byte Reload
; X64-NEXT: adcq %rdx, %r15 ; X64-NEXT: adcq %r14, %r15
; X64-NEXT: addq %rdi, %r12 ; X64-NEXT: addq %rdi, %r12
; X64-NEXT: adcq %rcx, %r15 ; X64-NEXT: adcq %rcx, %r15
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax ; X64-NEXT: movq %rcx, %rax
; X64-NEXT: movq %r11, %rsi ; X64-NEXT: movq %r11, %rsi
; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: mulq %r11 ; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %r11 ; X64-NEXT: movq %rdx, %r11
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload
@ -5232,7 +5239,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rax, %r9 ; X64-NEXT: movq %rax, %r9
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
; X64-NEXT: addq %rax, %rbp ; X64-NEXT: addq %r9, %rbp
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
; X64-NEXT: adcq %rdx, %rax ; X64-NEXT: adcq %rdx, %rax
; X64-NEXT: addq %rsi, %rbp ; X64-NEXT: addq %rsi, %rbp
@ -5410,7 +5417,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq 88(%rsi), %rax ; X64-NEXT: movq 88(%rsi), %rax
; X64-NEXT: movq %rsi, %r9 ; X64-NEXT: movq %rsi, %r9
; X64-NEXT: movq %rax, %rsi ; X64-NEXT: movq %rax, %rsi
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: mulq %rcx ; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rcx, %r11 ; X64-NEXT: movq %rcx, %r11
; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rdx, %rbp
@ -5446,12 +5453,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq %r8, %r10 ; X64-NEXT: adcq %r8, %r10
; X64-NEXT: addq %rbx, %rsi ; X64-NEXT: addq %rbx, %rsi
; X64-NEXT: adcq %rbp, %r10 ; X64-NEXT: adcq %rbp, %r10
; X64-NEXT: movq 64(%r9), %r13 ; X64-NEXT: movq %r9, %rdi
; X64-NEXT: movq 64(%rdi), %r13
; X64-NEXT: movq %r13, %rax ; X64-NEXT: movq %r13, %rax
; X64-NEXT: mulq %r11 ; X64-NEXT: mulq %r11
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq 72(%r9), %r9 ; X64-NEXT: movq 72(%rdi), %r9
; X64-NEXT: movq %r9, %rax ; X64-NEXT: movq %r9, %rax
; X64-NEXT: mulq %r11 ; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rdx, %rbp
@ -5479,8 +5487,8 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %rdx, %r11 ; X64-NEXT: movq %rdx, %r11
; X64-NEXT: movq %rax, %r15 ; X64-NEXT: movq %rax, %r15
; X64-NEXT: movq %r12, %rcx ; X64-NEXT: movq %r12, %rcx
; X64-NEXT: addq %rax, %rcx ; X64-NEXT: addq %r15, %rcx
; X64-NEXT: adcq %rdx, %r8 ; X64-NEXT: adcq %r11, %r8
; X64-NEXT: addq %rbp, %rcx ; X64-NEXT: addq %rbp, %rcx
; X64-NEXT: adcq %rbx, %r8 ; X64-NEXT: adcq %rbx, %r8
; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload ; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
@ -5532,13 +5540,14 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: setb %r10b ; X64-NEXT: setb %r10b
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
; X64-NEXT: movq %rsi, %rax ; X64-NEXT: movq %rsi, %rax
; X64-NEXT: mulq %r8 ; X64-NEXT: movq %r8, %rdi
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %rcx ; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %r9 ; X64-NEXT: movq %rax, %r9
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload
; X64-NEXT: movq %rbp, %rax ; X64-NEXT: movq %rbp, %rax
; X64-NEXT: mulq %r8 ; X64-NEXT: mulq %rdi
; X64-NEXT: movq %r8, %r12 ; X64-NEXT: movq %rdi, %r12
; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rdx, %rdi
; X64-NEXT: movq %rax, %rbx ; X64-NEXT: movq %rax, %rbx
; X64-NEXT: addq %rcx, %rbx ; X64-NEXT: addq %rcx, %rbx
@ -5577,7 +5586,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: imulq %rcx, %rdi ; X64-NEXT: imulq %rcx, %rdi
; X64-NEXT: movq %rcx, %rax ; X64-NEXT: movq %rcx, %rax
; X64-NEXT: movq %r12, %rsi ; X64-NEXT: movq %r12, %rsi
; X64-NEXT: mulq %r12 ; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rax, %r9 ; X64-NEXT: movq %rax, %r9
; X64-NEXT: addq %rdi, %rdx ; X64-NEXT: addq %rdi, %rdx
; X64-NEXT: movq 104(%rbp), %r8 ; X64-NEXT: movq 104(%rbp), %r8

View File

@ -909,7 +909,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X64-NEXT: movq 8(%rsi), %rbp ; X64-NEXT: movq 8(%rsi), %rbp
; X64-NEXT: movq %r15, %rax ; X64-NEXT: movq %r15, %rax
; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: mulq %rdx ; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %r9 ; X64-NEXT: movq %rdx, %r9
; X64-NEXT: movq %rax, %r8 ; X64-NEXT: movq %rax, %r8
; X64-NEXT: movq %r11, %rax ; X64-NEXT: movq %r11, %rax
@ -932,24 +932,23 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X64-NEXT: movq %r11, %rax ; X64-NEXT: movq %r11, %rax
; X64-NEXT: mulq %rbp ; X64-NEXT: mulq %rbp
; X64-NEXT: movq %rbp, %r14 ; X64-NEXT: movq %rbp, %r14
; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdx, %rsi ; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %rbp ; X64-NEXT: movq %rax, %rbp
; X64-NEXT: addq %rcx, %rbp ; X64-NEXT: addq %rcx, %rbp
; X64-NEXT: adcq %rbx, %rsi ; X64-NEXT: adcq %rbx, %rsi
; X64-NEXT: xorl %ecx, %ecx ; X64-NEXT: xorl %ecx, %ecx
; X64-NEXT: movq %r10, %rbx ; X64-NEXT: movq %r10, %rbx
; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %r10, %rax ; X64-NEXT: movq %rbx, %rax
; X64-NEXT: mulq %rcx ; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, %r13 ; X64-NEXT: movq %rdx, %r13
; X64-NEXT: movq %rax, %r10 ; X64-NEXT: movq %rax, %r10
; X64-NEXT: movq %r15, %rax ; X64-NEXT: movq %r15, %rax
; X64-NEXT: mulq %rcx ; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: # kill: %RAX<kill>
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rax, %r15 ; X64-NEXT: movq %rax, %r15
; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: addq %r10, %r15 ; X64-NEXT: addq %r10, %r15
; X64-NEXT: adcq %r13, %rdx ; X64-NEXT: adcq %r13, %rdx
; X64-NEXT: addq %rbp, %r15 ; X64-NEXT: addq %rbp, %r15
@ -988,8 +987,8 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X64-NEXT: mulq %rdx ; X64-NEXT: mulq %rdx
; X64-NEXT: movq %rdx, %r14 ; X64-NEXT: movq %rdx, %r14
; X64-NEXT: movq %rax, %r11 ; X64-NEXT: movq %rax, %r11
; X64-NEXT: addq %rax, %r10 ; X64-NEXT: addq %r11, %r10
; X64-NEXT: adcq %rdx, %r13 ; X64-NEXT: adcq %r14, %r13
; X64-NEXT: addq %rbp, %r10 ; X64-NEXT: addq %rbp, %r10
; X64-NEXT: adcq %rsi, %r13 ; X64-NEXT: adcq %rsi, %r13
; X64-NEXT: addq %r8, %r10 ; X64-NEXT: addq %r8, %r10
@ -1001,7 +1000,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X64-NEXT: movq 16(%rsi), %r8 ; X64-NEXT: movq 16(%rsi), %r8
; X64-NEXT: movq %rcx, %rax ; X64-NEXT: movq %rcx, %rax
; X64-NEXT: movq %rcx, %r9 ; X64-NEXT: movq %rcx, %r9
; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: mulq %r8 ; X64-NEXT: mulq %r8
; X64-NEXT: movq %rdx, %rdi ; X64-NEXT: movq %rdx, %rdi
; X64-NEXT: movq %rax, %r12 ; X64-NEXT: movq %rax, %r12
@ -1032,7 +1031,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X64-NEXT: mulq %rcx ; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill ; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rax, %rbp ; X64-NEXT: movq %rax, %rbp
; X64-NEXT: addq %rax, %r11 ; X64-NEXT: addq %rbp, %r11
; X64-NEXT: adcq %rdx, %r14 ; X64-NEXT: adcq %rdx, %r14
; X64-NEXT: addq %r9, %r11 ; X64-NEXT: addq %r9, %r11
; X64-NEXT: adcq %rbx, %r14 ; X64-NEXT: adcq %rbx, %r14

View File

@ -7,7 +7,7 @@ define i128 @foo(i128 %t, i128 %u) {
; X64-NEXT: movq %rdx, %r8 ; X64-NEXT: movq %rdx, %r8
; X64-NEXT: imulq %rdi, %rcx ; X64-NEXT: imulq %rdi, %rcx
; X64-NEXT: movq %rdi, %rax ; X64-NEXT: movq %rdi, %rax
; X64-NEXT: mulq %rdx ; X64-NEXT: mulq %r8
; X64-NEXT: addq %rcx, %rdx ; X64-NEXT: addq %rcx, %rdx
; X64-NEXT: imulq %r8, %rsi ; X64-NEXT: imulq %r8, %rsi
; X64-NEXT: addq %rsi, %rdx ; X64-NEXT: addq %rsi, %rdx

View File

@ -9,7 +9,7 @@ define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind {
; SSE2-LABEL: mul_v16i8c: ; SSE2-LABEL: mul_v16i8c:
; SSE2: # BB#0: # %entry ; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: psraw $8, %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117] ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117]
; SSE2-NEXT: pmullw %xmm2, %xmm1 ; SSE2-NEXT: pmullw %xmm2, %xmm1
@ -143,10 +143,10 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind {
; SSE2-LABEL: mul_v16i8: ; SSE2-LABEL: mul_v16i8:
; SSE2: # BB#0: # %entry ; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm3 ; SSE2-NEXT: psraw $8, %xmm3
; SSE2-NEXT: pmullw %xmm2, %xmm3 ; SSE2-NEXT: pmullw %xmm2, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
@ -386,7 +386,7 @@ define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind {
; SSE2-LABEL: mul_v32i8c: ; SSE2-LABEL: mul_v32i8c:
; SSE2: # BB#0: # %entry ; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [117,117,117,117,117,117,117,117] ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [117,117,117,117,117,117,117,117]
; SSE2-NEXT: pmullw %xmm3, %xmm2 ; SSE2-NEXT: pmullw %xmm3, %xmm2
@ -398,7 +398,7 @@ define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind {
; SSE2-NEXT: pand %xmm4, %xmm0 ; SSE2-NEXT: pand %xmm4, %xmm0
; SSE2-NEXT: packuswb %xmm2, %xmm0 ; SSE2-NEXT: packuswb %xmm2, %xmm0
; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: pmullw %xmm3, %xmm2 ; SSE2-NEXT: pmullw %xmm3, %xmm2
; SSE2-NEXT: pand %xmm4, %xmm2 ; SSE2-NEXT: pand %xmm4, %xmm2
@ -567,10 +567,10 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind {
; SSE2-LABEL: mul_v32i8: ; SSE2-LABEL: mul_v32i8:
; SSE2: # BB#0: # %entry ; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm2, %xmm4 ; SSE2-NEXT: movdqa %xmm2, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: psraw $8, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm5 ; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: psraw $8, %xmm5
; SSE2-NEXT: pmullw %xmm4, %xmm5 ; SSE2-NEXT: pmullw %xmm4, %xmm5
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
@ -583,10 +583,10 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind {
; SSE2-NEXT: pand %xmm4, %xmm0 ; SSE2-NEXT: pand %xmm4, %xmm0
; SSE2-NEXT: packuswb %xmm5, %xmm0 ; SSE2-NEXT: packuswb %xmm5, %xmm0
; SSE2-NEXT: movdqa %xmm3, %xmm2 ; SSE2-NEXT: movdqa %xmm3, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: movdqa %xmm1, %xmm5 ; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: psraw $8, %xmm5
; SSE2-NEXT: pmullw %xmm2, %xmm5 ; SSE2-NEXT: pmullw %xmm2, %xmm5
; SSE2-NEXT: pand %xmm4, %xmm5 ; SSE2-NEXT: pand %xmm4, %xmm5
@ -774,7 +774,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
; SSE2-LABEL: mul_v64i8c: ; SSE2-LABEL: mul_v64i8c:
; SSE2: # BB#0: # %entry ; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm6 ; SSE2-NEXT: movdqa %xmm0, %xmm6
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm6 ; SSE2-NEXT: psraw $8, %xmm6
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [117,117,117,117,117,117,117,117] ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [117,117,117,117,117,117,117,117]
; SSE2-NEXT: pmullw %xmm4, %xmm6 ; SSE2-NEXT: pmullw %xmm4, %xmm6
@ -786,7 +786,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
; SSE2-NEXT: pand %xmm5, %xmm0 ; SSE2-NEXT: pand %xmm5, %xmm0
; SSE2-NEXT: packuswb %xmm6, %xmm0 ; SSE2-NEXT: packuswb %xmm6, %xmm0
; SSE2-NEXT: movdqa %xmm1, %xmm6 ; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm1[8],xmm6[9],xmm1[9],xmm6[10],xmm1[10],xmm6[11],xmm1[11],xmm6[12],xmm1[12],xmm6[13],xmm1[13],xmm6[14],xmm1[14],xmm6[15],xmm1[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm6 ; SSE2-NEXT: psraw $8, %xmm6
; SSE2-NEXT: pmullw %xmm4, %xmm6 ; SSE2-NEXT: pmullw %xmm4, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm6 ; SSE2-NEXT: pand %xmm5, %xmm6
@ -796,7 +796,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
; SSE2-NEXT: pand %xmm5, %xmm1 ; SSE2-NEXT: pand %xmm5, %xmm1
; SSE2-NEXT: packuswb %xmm6, %xmm1 ; SSE2-NEXT: packuswb %xmm6, %xmm1
; SSE2-NEXT: movdqa %xmm2, %xmm6 ; SSE2-NEXT: movdqa %xmm2, %xmm6
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm6 ; SSE2-NEXT: psraw $8, %xmm6
; SSE2-NEXT: pmullw %xmm4, %xmm6 ; SSE2-NEXT: pmullw %xmm4, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm6 ; SSE2-NEXT: pand %xmm5, %xmm6
@ -806,7 +806,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
; SSE2-NEXT: pand %xmm5, %xmm2 ; SSE2-NEXT: pand %xmm5, %xmm2
; SSE2-NEXT: packuswb %xmm6, %xmm2 ; SSE2-NEXT: packuswb %xmm6, %xmm2
; SSE2-NEXT: movdqa %xmm3, %xmm6 ; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm3[8],xmm6[9],xmm3[9],xmm6[10],xmm3[10],xmm6[11],xmm3[11],xmm6[12],xmm3[12],xmm6[13],xmm3[13],xmm6[14],xmm3[14],xmm6[15],xmm3[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm6 ; SSE2-NEXT: psraw $8, %xmm6
; SSE2-NEXT: pmullw %xmm4, %xmm6 ; SSE2-NEXT: pmullw %xmm4, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm6 ; SSE2-NEXT: pand %xmm5, %xmm6
@ -821,7 +821,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind {
; SSE41: # BB#0: # %entry ; SSE41: # BB#0: # %entry
; SSE41-NEXT: movdqa %xmm1, %xmm4 ; SSE41-NEXT: movdqa %xmm1, %xmm4
; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 ; SSE41-NEXT: pmovsxbw %xmm1, %xmm0
; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [117,117,117,117,117,117,117,117] ; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [117,117,117,117,117,117,117,117]
; SSE41-NEXT: pmullw %xmm6, %xmm0 ; SSE41-NEXT: pmullw %xmm6, %xmm0
; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255] ; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
@ -939,10 +939,10 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
; SSE2-LABEL: mul_v64i8: ; SSE2-LABEL: mul_v64i8:
; SSE2: # BB#0: # %entry ; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm4, %xmm8 ; SSE2-NEXT: movdqa %xmm4, %xmm8
; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm4[8],xmm8[9],xmm4[9],xmm8[10],xmm4[10],xmm8[11],xmm4[11],xmm8[12],xmm4[12],xmm8[13],xmm4[13],xmm8[14],xmm4[14],xmm8[15],xmm4[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm8 ; SSE2-NEXT: psraw $8, %xmm8
; SSE2-NEXT: movdqa %xmm0, %xmm9 ; SSE2-NEXT: movdqa %xmm0, %xmm9
; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm0[8],xmm9[9],xmm0[9],xmm9[10],xmm0[10],xmm9[11],xmm0[11],xmm9[12],xmm0[12],xmm9[13],xmm0[13],xmm9[14],xmm0[14],xmm9[15],xmm0[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm9 ; SSE2-NEXT: psraw $8, %xmm9
; SSE2-NEXT: pmullw %xmm8, %xmm9 ; SSE2-NEXT: pmullw %xmm8, %xmm9
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255] ; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
@ -955,10 +955,10 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
; SSE2-NEXT: pand %xmm8, %xmm0 ; SSE2-NEXT: pand %xmm8, %xmm0
; SSE2-NEXT: packuswb %xmm9, %xmm0 ; SSE2-NEXT: packuswb %xmm9, %xmm0
; SSE2-NEXT: movdqa %xmm5, %xmm9 ; SSE2-NEXT: movdqa %xmm5, %xmm9
; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm5[8],xmm9[9],xmm5[9],xmm9[10],xmm5[10],xmm9[11],xmm5[11],xmm9[12],xmm5[12],xmm9[13],xmm5[13],xmm9[14],xmm5[14],xmm9[15],xmm5[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm9 ; SSE2-NEXT: psraw $8, %xmm9
; SSE2-NEXT: movdqa %xmm1, %xmm4 ; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: psraw $8, %xmm4
; SSE2-NEXT: pmullw %xmm9, %xmm4 ; SSE2-NEXT: pmullw %xmm9, %xmm4
; SSE2-NEXT: pand %xmm8, %xmm4 ; SSE2-NEXT: pand %xmm8, %xmm4
@ -970,10 +970,10 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
; SSE2-NEXT: pand %xmm8, %xmm1 ; SSE2-NEXT: pand %xmm8, %xmm1
; SSE2-NEXT: packuswb %xmm4, %xmm1 ; SSE2-NEXT: packuswb %xmm4, %xmm1
; SSE2-NEXT: movdqa %xmm6, %xmm4 ; SSE2-NEXT: movdqa %xmm6, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: psraw $8, %xmm4
; SSE2-NEXT: movdqa %xmm2, %xmm5 ; SSE2-NEXT: movdqa %xmm2, %xmm5
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: psraw $8, %xmm5
; SSE2-NEXT: pmullw %xmm4, %xmm5 ; SSE2-NEXT: pmullw %xmm4, %xmm5
; SSE2-NEXT: pand %xmm8, %xmm5 ; SSE2-NEXT: pand %xmm8, %xmm5
@ -985,10 +985,10 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
; SSE2-NEXT: pand %xmm8, %xmm2 ; SSE2-NEXT: pand %xmm8, %xmm2
; SSE2-NEXT: packuswb %xmm5, %xmm2 ; SSE2-NEXT: packuswb %xmm5, %xmm2
; SSE2-NEXT: movdqa %xmm7, %xmm4 ; SSE2-NEXT: movdqa %xmm7, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm7[8],xmm4[9],xmm7[9],xmm4[10],xmm7[10],xmm4[11],xmm7[11],xmm4[12],xmm7[12],xmm4[13],xmm7[13],xmm4[14],xmm7[14],xmm4[15],xmm7[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: psraw $8, %xmm4
; SSE2-NEXT: movdqa %xmm3, %xmm5 ; SSE2-NEXT: movdqa %xmm3, %xmm5
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm3[8],xmm5[9],xmm3[9],xmm5[10],xmm3[10],xmm5[11],xmm3[11],xmm5[12],xmm3[12],xmm5[13],xmm3[13],xmm5[14],xmm3[14],xmm5[15],xmm3[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: psraw $8, %xmm5
; SSE2-NEXT: pmullw %xmm4, %xmm5 ; SSE2-NEXT: pmullw %xmm4, %xmm5
; SSE2-NEXT: pand %xmm8, %xmm5 ; SSE2-NEXT: pand %xmm8, %xmm5
@ -1006,7 +1006,7 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind {
; SSE41-NEXT: movdqa %xmm1, %xmm8 ; SSE41-NEXT: movdqa %xmm1, %xmm8
; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pmovsxbw %xmm4, %xmm9 ; SSE41-NEXT: pmovsxbw %xmm4, %xmm9
; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 ; SSE41-NEXT: pmovsxbw %xmm1, %xmm0
; SSE41-NEXT: pmullw %xmm9, %xmm0 ; SSE41-NEXT: pmullw %xmm9, %xmm0
; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,255,255] ; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,255,255]
; SSE41-NEXT: pand %xmm9, %xmm0 ; SSE41-NEXT: pand %xmm9, %xmm0

View File

@ -5,7 +5,7 @@ define double @pow_wrapper(double %a) nounwind readonly ssp noredzone {
; CHECK-LABEL: pow_wrapper: ; CHECK-LABEL: pow_wrapper:
; CHECK: # BB#0: ; CHECK: # BB#0:
; CHECK-NEXT: movapd %xmm0, %xmm1 ; CHECK-NEXT: movapd %xmm0, %xmm1
; CHECK-NEXT: mulsd %xmm0, %xmm1 ; CHECK-NEXT: mulsd %xmm1, %xmm1
; CHECK-NEXT: mulsd %xmm1, %xmm0 ; CHECK-NEXT: mulsd %xmm1, %xmm0
; CHECK-NEXT: mulsd %xmm1, %xmm1 ; CHECK-NEXT: mulsd %xmm1, %xmm1
; CHECK-NEXT: mulsd %xmm1, %xmm0 ; CHECK-NEXT: mulsd %xmm1, %xmm0

View File

@ -25,7 +25,7 @@ define <3 x double> @v3f2d_ext_vec(<3 x float> %v1) nounwind {
; SSE-NEXT: cvtps2pd %xmm0, %xmm0 ; SSE-NEXT: cvtps2pd %xmm0, %xmm0
; SSE-NEXT: movlps %xmm0, -{{[0-9]+}}(%rsp) ; SSE-NEXT: movlps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps %xmm2, %xmm1 ; SSE-NEXT: movaps %xmm2, %xmm1
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm2[1],xmm1[1] ; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: fldl -{{[0-9]+}}(%rsp) ; SSE-NEXT: fldl -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps %xmm2, %xmm0 ; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: retq ; SSE-NEXT: retq

View File

@ -49,16 +49,16 @@ define <4 x float> @bar(<4 x float>* %a1p, <4 x float>* %a2p, <4 x float> %a3, <
; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm9[0,1],xmm2[3],xmm9[3] ; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm9[0,1],xmm2[3],xmm9[3]
; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm12[0] ; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm12[0]
; CHECK-NEXT: vaddps %xmm3, %xmm2, %xmm2 ; CHECK-NEXT: vaddps %xmm3, %xmm2, %xmm2
; CHECK-NEXT: vmovaps %xmm15, {{[0-9]+}}(%rsp) # 16-byte Spill ; CHECK-NEXT: vmovaps %xmm15, %xmm1
; CHECK-NEXT: vaddps %xmm0, %xmm15, %xmm9 ; CHECK-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm9
; CHECK-NEXT: vaddps %xmm14, %xmm10, %xmm0 ; CHECK-NEXT: vaddps %xmm14, %xmm10, %xmm0
; CHECK-NEXT: vaddps %xmm15, %xmm15, %xmm8 ; CHECK-NEXT: vaddps %xmm1, %xmm1, %xmm8
; CHECK-NEXT: vaddps %xmm11, %xmm3, %xmm3 ; CHECK-NEXT: vaddps %xmm11, %xmm3, %xmm3
; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0 ; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
; CHECK-NEXT: vaddps %xmm0, %xmm15, %xmm0 ; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0
; CHECK-NEXT: vmovaps %xmm8, {{[0-9]+}}(%rsp) ; CHECK-NEXT: vmovaps %xmm8, {{[0-9]+}}(%rsp)
; CHECK-NEXT: vmovaps %xmm9, (%rsp) ; CHECK-NEXT: vmovaps %xmm9, (%rsp)
; CHECK-NEXT: vmovaps %xmm15, %xmm1
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload ; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
; CHECK-NEXT: vzeroupper ; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq foo ; CHECK-NEXT: callq foo

View File

@ -638,7 +638,7 @@ define <16 x i8> @test14(<16 x i8> %x, <16 x i32> %y) nounwind {
; SSE41-LABEL: test14: ; SSE41-LABEL: test14:
; SSE41: ## BB#0: ## %vector.ph ; SSE41: ## BB#0: ## %vector.ph
; SSE41-NEXT: movdqa %xmm0, %xmm5 ; SSE41-NEXT: movdqa %xmm0, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,2,3]
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm5[2,3,0,1] ; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm5[2,3,0,1]

View File

@ -23,7 +23,8 @@ define i32 @test1(%0* %p, %0* %q, i1 %r) nounwind {
; MCU-NEXT: jne .LBB0_1 ; MCU-NEXT: jne .LBB0_1
; MCU-NEXT: # BB#2: ; MCU-NEXT: # BB#2:
; MCU-NEXT: addl $8, %edx ; MCU-NEXT: addl $8, %edx
; MCU-NEXT: movl (%edx), %eax ; MCU-NEXT: movl %edx, %eax
; MCU-NEXT: movl (%eax), %eax
; MCU-NEXT: retl ; MCU-NEXT: retl
; MCU-NEXT: .LBB0_1: ; MCU-NEXT: .LBB0_1:
; MCU-NEXT: addl $8, %eax ; MCU-NEXT: addl $8, %eax

View File

@ -61,7 +61,7 @@ false:
; CHECK-LABEL: @use_eax_before_prologue@8: # @use_eax_before_prologue ; CHECK-LABEL: @use_eax_before_prologue@8: # @use_eax_before_prologue
; CHECK: movl %ecx, %eax ; CHECK: movl %ecx, %eax
; CHECK: cmpl %edx, %ecx ; CHECK: cmpl %edx, %eax
; CHECK: jge LBB1_2 ; CHECK: jge LBB1_2
; CHECK: pushl %eax ; CHECK: pushl %eax
; CHECK: movl $4092, %eax ; CHECK: movl $4092, %eax

View File

@ -132,7 +132,7 @@ define float @f32_estimate(float %x) #1 {
; SSE: # BB#0: ; SSE: # BB#0:
; SSE-NEXT: rsqrtss %xmm0, %xmm1 ; SSE-NEXT: rsqrtss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm2 ; SSE-NEXT: movaps %xmm1, %xmm2
; SSE-NEXT: mulss %xmm1, %xmm2 ; SSE-NEXT: mulss %xmm2, %xmm2
; SSE-NEXT: mulss %xmm0, %xmm2 ; SSE-NEXT: mulss %xmm0, %xmm2
; SSE-NEXT: addss {{.*}}(%rip), %xmm2 ; SSE-NEXT: addss {{.*}}(%rip), %xmm2
; SSE-NEXT: mulss {{.*}}(%rip), %xmm1 ; SSE-NEXT: mulss {{.*}}(%rip), %xmm1
@ -178,7 +178,7 @@ define <4 x float> @v4f32_estimate(<4 x float> %x) #1 {
; SSE: # BB#0: ; SSE: # BB#0:
; SSE-NEXT: rsqrtps %xmm0, %xmm1 ; SSE-NEXT: rsqrtps %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm2 ; SSE-NEXT: movaps %xmm1, %xmm2
; SSE-NEXT: mulps %xmm1, %xmm2 ; SSE-NEXT: mulps %xmm2, %xmm2
; SSE-NEXT: mulps %xmm0, %xmm2 ; SSE-NEXT: mulps %xmm0, %xmm2
; SSE-NEXT: addps {{.*}}(%rip), %xmm2 ; SSE-NEXT: addps {{.*}}(%rip), %xmm2
; SSE-NEXT: mulps {{.*}}(%rip), %xmm1 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm1
@ -228,7 +228,7 @@ define <8 x float> @v8f32_estimate(<8 x float> %x) #1 {
; SSE-NEXT: rsqrtps %xmm0, %xmm3 ; SSE-NEXT: rsqrtps %xmm0, %xmm3
; SSE-NEXT: movaps {{.*#+}} xmm4 = [-5.000000e-01,-5.000000e-01,-5.000000e-01,-5.000000e-01] ; SSE-NEXT: movaps {{.*#+}} xmm4 = [-5.000000e-01,-5.000000e-01,-5.000000e-01,-5.000000e-01]
; SSE-NEXT: movaps %xmm3, %xmm2 ; SSE-NEXT: movaps %xmm3, %xmm2
; SSE-NEXT: mulps %xmm3, %xmm2 ; SSE-NEXT: mulps %xmm2, %xmm2
; SSE-NEXT: mulps %xmm0, %xmm2 ; SSE-NEXT: mulps %xmm0, %xmm2
; SSE-NEXT: movaps {{.*#+}} xmm0 = [-3.000000e+00,-3.000000e+00,-3.000000e+00,-3.000000e+00] ; SSE-NEXT: movaps {{.*#+}} xmm0 = [-3.000000e+00,-3.000000e+00,-3.000000e+00,-3.000000e+00]
; SSE-NEXT: addps %xmm0, %xmm2 ; SSE-NEXT: addps %xmm0, %xmm2
@ -236,7 +236,7 @@ define <8 x float> @v8f32_estimate(<8 x float> %x) #1 {
; SSE-NEXT: mulps %xmm3, %xmm2 ; SSE-NEXT: mulps %xmm3, %xmm2
; SSE-NEXT: rsqrtps %xmm1, %xmm5 ; SSE-NEXT: rsqrtps %xmm1, %xmm5
; SSE-NEXT: movaps %xmm5, %xmm3 ; SSE-NEXT: movaps %xmm5, %xmm3
; SSE-NEXT: mulps %xmm5, %xmm3 ; SSE-NEXT: mulps %xmm3, %xmm3
; SSE-NEXT: mulps %xmm1, %xmm3 ; SSE-NEXT: mulps %xmm1, %xmm3
; SSE-NEXT: addps %xmm0, %xmm3 ; SSE-NEXT: addps %xmm0, %xmm3
; SSE-NEXT: mulps %xmm4, %xmm3 ; SSE-NEXT: mulps %xmm4, %xmm3

View File

@ -1084,7 +1084,8 @@ define <4 x float> @add_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c,
; SSE2-NEXT: testb $1, %dil ; SSE2-NEXT: testb $1, %dil
; SSE2-NEXT: jne .LBB62_1 ; SSE2-NEXT: jne .LBB62_1
; SSE2-NEXT: # BB#2: ; SSE2-NEXT: # BB#2:
; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3] ; SSE2-NEXT: movaps %xmm2, %xmm1
; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; SSE2-NEXT: .LBB62_1: ; SSE2-NEXT: .LBB62_1:
; SSE2-NEXT: addss %xmm0, %xmm1 ; SSE2-NEXT: addss %xmm0, %xmm1
@ -1096,7 +1097,8 @@ define <4 x float> @add_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c,
; SSE41-NEXT: testb $1, %dil ; SSE41-NEXT: testb $1, %dil
; SSE41-NEXT: jne .LBB62_1 ; SSE41-NEXT: jne .LBB62_1
; SSE41-NEXT: # BB#2: ; SSE41-NEXT: # BB#2:
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3] ; SSE41-NEXT: movaps %xmm2, %xmm1
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSE41-NEXT: retq ; SSE41-NEXT: retq
; SSE41-NEXT: .LBB62_1: ; SSE41-NEXT: .LBB62_1:
; SSE41-NEXT: addss %xmm0, %xmm1 ; SSE41-NEXT: addss %xmm0, %xmm1
@ -1137,7 +1139,8 @@ define <2 x double> @add_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double>
; SSE2-NEXT: testb $1, %dil ; SSE2-NEXT: testb $1, %dil
; SSE2-NEXT: jne .LBB63_1 ; SSE2-NEXT: jne .LBB63_1
; SSE2-NEXT: # BB#2: ; SSE2-NEXT: # BB#2:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1] ; SSE2-NEXT: movapd %xmm2, %xmm1
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; SSE2-NEXT: .LBB63_1: ; SSE2-NEXT: .LBB63_1:
; SSE2-NEXT: addsd %xmm0, %xmm1 ; SSE2-NEXT: addsd %xmm0, %xmm1
@ -1149,7 +1152,8 @@ define <2 x double> @add_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double>
; SSE41-NEXT: testb $1, %dil ; SSE41-NEXT: testb $1, %dil
; SSE41-NEXT: jne .LBB63_1 ; SSE41-NEXT: jne .LBB63_1
; SSE41-NEXT: # BB#2: ; SSE41-NEXT: # BB#2:
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm2[0],xmm0[1] ; SSE41-NEXT: movapd %xmm2, %xmm1
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE41-NEXT: retq ; SSE41-NEXT: retq
; SSE41-NEXT: .LBB63_1: ; SSE41-NEXT: .LBB63_1:
; SSE41-NEXT: addsd %xmm0, %xmm1 ; SSE41-NEXT: addsd %xmm0, %xmm1

View File

@ -16,7 +16,7 @@ define <2 x float> @test4(<2 x float> %A, <2 x float> %B) nounwind {
; X32-LABEL: test4: ; X32-LABEL: test4:
; X32: # BB#0: # %entry ; X32: # BB#0: # %entry
; X32-NEXT: movaps %xmm0, %xmm2 ; X32-NEXT: movaps %xmm0, %xmm2
; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3] ; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
; X32-NEXT: addss %xmm1, %xmm0 ; X32-NEXT: addss %xmm1, %xmm0
; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] ; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
; X32-NEXT: subss %xmm1, %xmm2 ; X32-NEXT: subss %xmm1, %xmm2
@ -26,7 +26,7 @@ define <2 x float> @test4(<2 x float> %A, <2 x float> %B) nounwind {
; X64-LABEL: test4: ; X64-LABEL: test4:
; X64: # BB#0: # %entry ; X64: # BB#0: # %entry
; X64-NEXT: movaps %xmm0, %xmm2 ; X64-NEXT: movaps %xmm0, %xmm2
; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3] ; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
; X64-NEXT: addss %xmm1, %xmm0 ; X64-NEXT: addss %xmm1, %xmm0
; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] ; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
; X64-NEXT: subss %xmm1, %xmm2 ; X64-NEXT: subss %xmm1, %xmm2

View File

@ -406,9 +406,9 @@ define <4 x float> @test16(<4 x float> %A, <4 x float> %B) {
; SSE-NEXT: movaps %xmm0, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: subss %xmm0, %xmm2 ; SSE-NEXT: subss %xmm0, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm3 ; SSE-NEXT: movaps %xmm0, %xmm3
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm0[1],xmm3[1] ; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
; SSE-NEXT: movaps %xmm1, %xmm4 ; SSE-NEXT: movaps %xmm1, %xmm4
; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm1[1],xmm4[1] ; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1]
; SSE-NEXT: subss %xmm4, %xmm3 ; SSE-NEXT: subss %xmm4, %xmm3
; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3] ; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
; SSE-NEXT: addss %xmm0, %xmm4 ; SSE-NEXT: addss %xmm0, %xmm4

View File

@ -126,7 +126,7 @@ define void @test6(i32 %a) gc "statepoint-example" {
; CHECK-NEXT: Lcfi11: ; CHECK-NEXT: Lcfi11:
; CHECK-NEXT: .cfi_offset %rbx, -16 ; CHECK-NEXT: .cfi_offset %rbx, -16
; CHECK-NEXT: movl %edi, %ebx ; CHECK-NEXT: movl %edi, %ebx
; CHECK-NEXT: movl %edi, {{[0-9]+}}(%rsp) ; CHECK-NEXT: movl %ebx, {{[0-9]+}}(%rsp)
; CHECK-NEXT: callq _baz ; CHECK-NEXT: callq _baz
; CHECK-NEXT: Ltmp6: ; CHECK-NEXT: Ltmp6:
; CHECK-NEXT: callq _bar ; CHECK-NEXT: callq _bar
@ -153,13 +153,13 @@ entry:
; CHECK: .byte 1 ; CHECK: .byte 1
; CHECK-NEXT: .byte 0 ; CHECK-NEXT: .byte 0
; CHECK-NEXT: .short 4 ; CHECK-NEXT: .short 4
; CHECK-NEXT: .short 5 ; CHECK-NEXT: .short 6
; CHECK-NEXT: .short 0 ; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0 ; CHECK-NEXT: .long 0
; CHECK: .byte 1 ; CHECK: .byte 1
; CHECK-NEXT: .byte 0 ; CHECK-NEXT: .byte 0
; CHECK-NEXT: .short 4 ; CHECK-NEXT: .short 4
; CHECK-NEXT: .short 4 ; CHECK-NEXT: .short 3
; CHECK-NEXT: .short 0 ; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0 ; CHECK-NEXT: .long 0
; CHECK: Ltmp2-_test2 ; CHECK: Ltmp2-_test2

View File

@ -61,9 +61,9 @@ define i32 @back_to_back_deopt(i32 %a, i32 %b, i32 %c) #1
gc "statepoint-example" { gc "statepoint-example" {
; CHECK-LABEL: back_to_back_deopt ; CHECK-LABEL: back_to_back_deopt
; The exact stores don't matter, but there need to be three stack slots created ; The exact stores don't matter, but there need to be three stack slots created
; CHECK-DAG: movl %edi, 12(%rsp) ; CHECK-DAG: movl %ebx, 12(%rsp)
; CHECK-DAG: movl %esi, 8(%rsp) ; CHECK-DAG: movl %ebp, 8(%rsp)
; CHECK-DAG: movl %edx, 4(%rsp) ; CHECK-DAG: movl %r14d, 4(%rsp)
; CHECK: callq ; CHECK: callq
; CHECK-DAG: movl %ebx, 12(%rsp) ; CHECK-DAG: movl %ebx, 12(%rsp)
; CHECK-DAG: movl %ebp, 8(%rsp) ; CHECK-DAG: movl %ebp, 8(%rsp)

View File

@ -1018,12 +1018,12 @@ define <4 x i64> @fptosi_4f32_to_4i64(<8 x float> %a) {
; SSE-NEXT: cvttss2si %xmm0, %rax ; SSE-NEXT: cvttss2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm2 ; SSE-NEXT: movq %rax, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm1 ; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3] ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
; SSE-NEXT: cvttss2si %xmm1, %rax ; SSE-NEXT: cvttss2si %xmm1, %rax
; SSE-NEXT: movq %rax, %xmm1 ; SSE-NEXT: movq %rax, %xmm1
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE-NEXT: movaps %xmm0, %xmm1 ; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3] ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE-NEXT: cvttss2si %xmm1, %rax ; SSE-NEXT: cvttss2si %xmm1, %rax
; SSE-NEXT: movq %rax, %xmm3 ; SSE-NEXT: movq %rax, %xmm3
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
@ -1126,12 +1126,12 @@ define <4 x i64> @fptosi_8f32_to_4i64(<8 x float> %a) {
; SSE-NEXT: cvttss2si %xmm0, %rax ; SSE-NEXT: cvttss2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm2 ; SSE-NEXT: movq %rax, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm1 ; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3] ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
; SSE-NEXT: cvttss2si %xmm1, %rax ; SSE-NEXT: cvttss2si %xmm1, %rax
; SSE-NEXT: movq %rax, %xmm1 ; SSE-NEXT: movq %rax, %xmm1
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE-NEXT: movaps %xmm0, %xmm1 ; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3] ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE-NEXT: cvttss2si %xmm1, %rax ; SSE-NEXT: cvttss2si %xmm1, %rax
; SSE-NEXT: movq %rax, %xmm3 ; SSE-NEXT: movq %rax, %xmm3
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
@ -1316,11 +1316,11 @@ define <4 x i32> @fptoui_4f32_to_4i32(<4 x float> %a) {
; SSE-LABEL: fptoui_4f32_to_4i32: ; SSE-LABEL: fptoui_4f32_to_4i32:
; SSE: # BB#0: ; SSE: # BB#0:
; SSE-NEXT: movaps %xmm0, %xmm1 ; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3] ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE-NEXT: cvttss2si %xmm1, %rax ; SSE-NEXT: cvttss2si %xmm1, %rax
; SSE-NEXT: movd %eax, %xmm1 ; SSE-NEXT: movd %eax, %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2 ; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] ; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: cvttss2si %xmm2, %rax ; SSE-NEXT: cvttss2si %xmm2, %rax
; SSE-NEXT: movd %eax, %xmm2 ; SSE-NEXT: movd %eax, %xmm2
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
@ -1560,7 +1560,7 @@ define <8 x i32> @fptoui_8f32_to_8i32(<8 x float> %a) {
; SSE-NEXT: cvttss2si %xmm0, %rax ; SSE-NEXT: cvttss2si %xmm0, %rax
; SSE-NEXT: movd %eax, %xmm0 ; SSE-NEXT: movd %eax, %xmm0
; SSE-NEXT: movaps %xmm2, %xmm3 ; SSE-NEXT: movaps %xmm2, %xmm3
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm2[1],xmm3[1] ; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
; SSE-NEXT: cvttss2si %xmm3, %rax ; SSE-NEXT: cvttss2si %xmm3, %rax
; SSE-NEXT: movd %eax, %xmm3 ; SSE-NEXT: movd %eax, %xmm3
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1] ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
@ -1572,11 +1572,11 @@ define <8 x i32> @fptoui_8f32_to_8i32(<8 x float> %a) {
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
; SSE-NEXT: movaps %xmm1, %xmm2 ; SSE-NEXT: movaps %xmm1, %xmm2
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm1[2,3] ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; SSE-NEXT: cvttss2si %xmm2, %rax ; SSE-NEXT: cvttss2si %xmm2, %rax
; SSE-NEXT: movd %eax, %xmm2 ; SSE-NEXT: movd %eax, %xmm2
; SSE-NEXT: movaps %xmm1, %xmm3 ; SSE-NEXT: movaps %xmm1, %xmm3
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm1[1],xmm3[1] ; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
; SSE-NEXT: cvttss2si %xmm3, %rax ; SSE-NEXT: cvttss2si %xmm3, %rax
; SSE-NEXT: movd %eax, %xmm3 ; SSE-NEXT: movd %eax, %xmm3
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
@ -1687,7 +1687,7 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) {
; SSE-NEXT: cmovaeq %rcx, %rdx ; SSE-NEXT: cmovaeq %rcx, %rdx
; SSE-NEXT: movq %rdx, %xmm2 ; SSE-NEXT: movq %rdx, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm3 ; SSE-NEXT: movaps %xmm0, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[2,3] ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3]
; SSE-NEXT: movaps %xmm3, %xmm4 ; SSE-NEXT: movaps %xmm3, %xmm4
; SSE-NEXT: subss %xmm1, %xmm4 ; SSE-NEXT: subss %xmm1, %xmm4
; SSE-NEXT: cvttss2si %xmm4, %rcx ; SSE-NEXT: cvttss2si %xmm4, %rcx
@ -1698,7 +1698,7 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) {
; SSE-NEXT: movq %rdx, %xmm3 ; SSE-NEXT: movq %rdx, %xmm3
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSE-NEXT: movaps %xmm0, %xmm3 ; SSE-NEXT: movaps %xmm0, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3] ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
; SSE-NEXT: movaps %xmm3, %xmm4 ; SSE-NEXT: movaps %xmm3, %xmm4
; SSE-NEXT: subss %xmm1, %xmm4 ; SSE-NEXT: subss %xmm1, %xmm4
; SSE-NEXT: cvttss2si %xmm4, %rcx ; SSE-NEXT: cvttss2si %xmm4, %rcx
@ -1865,7 +1865,7 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8 x float> %a) {
; SSE-NEXT: cmovaeq %rcx, %rdx ; SSE-NEXT: cmovaeq %rcx, %rdx
; SSE-NEXT: movq %rdx, %xmm2 ; SSE-NEXT: movq %rdx, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm3 ; SSE-NEXT: movaps %xmm0, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[2,3] ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3]
; SSE-NEXT: movaps %xmm3, %xmm4 ; SSE-NEXT: movaps %xmm3, %xmm4
; SSE-NEXT: subss %xmm1, %xmm4 ; SSE-NEXT: subss %xmm1, %xmm4
; SSE-NEXT: cvttss2si %xmm4, %rcx ; SSE-NEXT: cvttss2si %xmm4, %rcx
@ -1876,7 +1876,7 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8 x float> %a) {
; SSE-NEXT: movq %rdx, %xmm3 ; SSE-NEXT: movq %rdx, %xmm3
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSE-NEXT: movaps %xmm0, %xmm3 ; SSE-NEXT: movaps %xmm0, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3] ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
; SSE-NEXT: movaps %xmm3, %xmm4 ; SSE-NEXT: movaps %xmm3, %xmm4
; SSE-NEXT: subss %xmm1, %xmm4 ; SSE-NEXT: subss %xmm1, %xmm4
; SSE-NEXT: cvttss2si %xmm4, %rcx ; SSE-NEXT: cvttss2si %xmm4, %rcx

View File

@ -1611,7 +1611,7 @@ define <4 x float> @uitofp_2i64_to_4f32(<2 x i64> %a) {
; SSE-LABEL: uitofp_2i64_to_4f32: ; SSE-LABEL: uitofp_2i64_to_4f32:
; SSE: # BB#0: ; SSE: # BB#0:
; SSE-NEXT: movdqa %xmm0, %xmm1 ; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB39_1 ; SSE-NEXT: js .LBB39_1
; SSE-NEXT: # BB#2: ; SSE-NEXT: # BB#2:
@ -1839,7 +1839,7 @@ define <4 x float> @uitofp_4i64_to_4f32_undef(<2 x i64> %a) {
; SSE-LABEL: uitofp_4i64_to_4f32_undef: ; SSE-LABEL: uitofp_4i64_to_4f32_undef:
; SSE: # BB#0: ; SSE: # BB#0:
; SSE-NEXT: movdqa %xmm0, %xmm1 ; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: movq %xmm0, %rax ; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: testq %rax, %rax ; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB41_1 ; SSE-NEXT: js .LBB41_1
; SSE-NEXT: # BB#2: ; SSE-NEXT: # BB#2:

View File

@ -437,7 +437,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE42: # BB#0: ; SSE42: # BB#0:
; SSE42-NEXT: movdqa %xmm0, %xmm2 ; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: movdqa %xmm1, %xmm3 ; SSE42-NEXT: movdqa %xmm1, %xmm3
; SSE42-NEXT: pcmpgtq %xmm0, %xmm3 ; SSE42-NEXT: pcmpgtq %xmm2, %xmm3
; SSE42-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE42-NEXT: pcmpeqd %xmm0, %xmm0
; SSE42-NEXT: pxor %xmm3, %xmm0 ; SSE42-NEXT: pxor %xmm3, %xmm0
; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1 ; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1

View File

@ -35,7 +35,7 @@ define <2 x i64> @shl2(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
; X32: # BB#0: # %entry ; X32: # BB#0: # %entry
; X32-NEXT: movdqa %xmm0, %xmm2 ; X32-NEXT: movdqa %xmm0, %xmm2
; X32-NEXT: psllw $5, %xmm1 ; X32-NEXT: psllw $5, %xmm1
; X32-NEXT: movdqa %xmm0, %xmm3 ; X32-NEXT: movdqa %xmm2, %xmm3
; X32-NEXT: psllw $4, %xmm3 ; X32-NEXT: psllw $4, %xmm3
; X32-NEXT: pand {{\.LCPI.*}}, %xmm3 ; X32-NEXT: pand {{\.LCPI.*}}, %xmm3
; X32-NEXT: movdqa %xmm1, %xmm0 ; X32-NEXT: movdqa %xmm1, %xmm0
@ -47,7 +47,7 @@ define <2 x i64> @shl2(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
; X32-NEXT: movdqa %xmm1, %xmm0 ; X32-NEXT: movdqa %xmm1, %xmm0
; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; X32-NEXT: movdqa %xmm2, %xmm3 ; X32-NEXT: movdqa %xmm2, %xmm3
; X32-NEXT: paddb %xmm2, %xmm3 ; X32-NEXT: paddb %xmm3, %xmm3
; X32-NEXT: paddb %xmm1, %xmm1 ; X32-NEXT: paddb %xmm1, %xmm1
; X32-NEXT: movdqa %xmm1, %xmm0 ; X32-NEXT: movdqa %xmm1, %xmm0
; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2
@ -58,7 +58,7 @@ define <2 x i64> @shl2(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
; X64: # BB#0: # %entry ; X64: # BB#0: # %entry
; X64-NEXT: movdqa %xmm0, %xmm2 ; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: psllw $5, %xmm1 ; X64-NEXT: psllw $5, %xmm1
; X64-NEXT: movdqa %xmm0, %xmm3 ; X64-NEXT: movdqa %xmm2, %xmm3
; X64-NEXT: psllw $4, %xmm3 ; X64-NEXT: psllw $4, %xmm3
; X64-NEXT: pand {{.*}}(%rip), %xmm3 ; X64-NEXT: pand {{.*}}(%rip), %xmm3
; X64-NEXT: movdqa %xmm1, %xmm0 ; X64-NEXT: movdqa %xmm1, %xmm0
@ -70,7 +70,7 @@ define <2 x i64> @shl2(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
; X64-NEXT: movdqa %xmm1, %xmm0 ; X64-NEXT: movdqa %xmm1, %xmm0
; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; X64-NEXT: movdqa %xmm2, %xmm3 ; X64-NEXT: movdqa %xmm2, %xmm3
; X64-NEXT: paddb %xmm2, %xmm3 ; X64-NEXT: paddb %xmm3, %xmm3
; X64-NEXT: paddb %xmm1, %xmm1 ; X64-NEXT: paddb %xmm1, %xmm1
; X64-NEXT: movdqa %xmm1, %xmm0 ; X64-NEXT: movdqa %xmm1, %xmm0
; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2

View File

@ -992,7 +992,7 @@ define <4 x i32> @blend_neg_logic_v4i32_2(<4 x i32> %v, <4 x i32> %c) {
; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: psrad $31, %xmm1 ; SSE41-NEXT: psrad $31, %xmm1
; SSE41-NEXT: pxor %xmm3, %xmm3 ; SSE41-NEXT: pxor %xmm3, %xmm3
; SSE41-NEXT: psubd %xmm0, %xmm3 ; SSE41-NEXT: psubd %xmm2, %xmm3
; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: blendvps %xmm0, %xmm2, %xmm3 ; SSE41-NEXT: blendvps %xmm0, %xmm2, %xmm3
; SSE41-NEXT: movaps %xmm3, %xmm0 ; SSE41-NEXT: movaps %xmm3, %xmm0

View File

@ -176,13 +176,13 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: test_div7_16i8: ; SSE2-LABEL: test_div7_16i8:
; SSE2: # BB#0: ; SSE2: # BB#0:
; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427] ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427]
; SSE2-NEXT: pmullw %xmm3, %xmm2 ; SSE2-NEXT: pmullw %xmm3, %xmm2
; SSE2-NEXT: psrlw $8, %xmm2 ; SSE2-NEXT: psrlw $8, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: psraw $8, %xmm1
; SSE2-NEXT: pmullw %xmm3, %xmm1 ; SSE2-NEXT: pmullw %xmm3, %xmm1
; SSE2-NEXT: psrlw $8, %xmm1 ; SSE2-NEXT: psrlw $8, %xmm1
@ -482,13 +482,13 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: test_rem7_16i8: ; SSE2-LABEL: test_rem7_16i8:
; SSE2: # BB#0: ; SSE2: # BB#0:
; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427] ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427]
; SSE2-NEXT: pmullw %xmm3, %xmm2 ; SSE2-NEXT: pmullw %xmm3, %xmm2
; SSE2-NEXT: psrlw $8, %xmm2 ; SSE2-NEXT: psrlw $8, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: psraw $8, %xmm1
; SSE2-NEXT: pmullw %xmm3, %xmm1 ; SSE2-NEXT: pmullw %xmm3, %xmm1
; SSE2-NEXT: psrlw $8, %xmm1 ; SSE2-NEXT: psrlw $8, %xmm1
@ -504,7 +504,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
; SSE2-NEXT: paddb %xmm2, %xmm1 ; SSE2-NEXT: paddb %xmm2, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7] ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7]
; SSE2-NEXT: pmullw %xmm3, %xmm2 ; SSE2-NEXT: pmullw %xmm3, %xmm2

View File

@ -481,7 +481,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE2-NEXT: psrlw $2, %xmm1 ; SSE2-NEXT: psrlw $2, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7] ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7]
; SSE2-NEXT: pmullw %xmm3, %xmm2 ; SSE2-NEXT: pmullw %xmm3, %xmm2

View File

@ -361,7 +361,7 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE41-NEXT: psllw $4, %xmm1 ; SSE41-NEXT: psllw $4, %xmm1
; SSE41-NEXT: por %xmm0, %xmm1 ; SSE41-NEXT: por %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm4 ; SSE41-NEXT: movdqa %xmm1, %xmm4
; SSE41-NEXT: paddw %xmm1, %xmm4 ; SSE41-NEXT: paddw %xmm4, %xmm4
; SSE41-NEXT: movdqa %xmm3, %xmm6 ; SSE41-NEXT: movdqa %xmm3, %xmm6
; SSE41-NEXT: psllw $8, %xmm6 ; SSE41-NEXT: psllw $8, %xmm6
; SSE41-NEXT: movdqa %xmm3, %xmm5 ; SSE41-NEXT: movdqa %xmm3, %xmm5
@ -386,7 +386,7 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE41-NEXT: psllw $4, %xmm2 ; SSE41-NEXT: psllw $4, %xmm2
; SSE41-NEXT: por %xmm0, %xmm2 ; SSE41-NEXT: por %xmm0, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm1 ; SSE41-NEXT: movdqa %xmm2, %xmm1
; SSE41-NEXT: paddw %xmm2, %xmm1 ; SSE41-NEXT: paddw %xmm1, %xmm1
; SSE41-NEXT: movdqa %xmm3, %xmm4 ; SSE41-NEXT: movdqa %xmm3, %xmm4
; SSE41-NEXT: psrlw $8, %xmm4 ; SSE41-NEXT: psrlw $8, %xmm4
; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: movdqa %xmm2, %xmm0
@ -631,10 +631,10 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8] ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; SSE41-NEXT: psubb %xmm3, %xmm2 ; SSE41-NEXT: psubb %xmm3, %xmm2
; SSE41-NEXT: psllw $5, %xmm3 ; SSE41-NEXT: psllw $5, %xmm3
; SSE41-NEXT: movdqa %xmm0, %xmm5 ; SSE41-NEXT: movdqa %xmm1, %xmm5
; SSE41-NEXT: psllw $4, %xmm5 ; SSE41-NEXT: psllw $4, %xmm5
; SSE41-NEXT: pand {{.*}}(%rip), %xmm5 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm5
; SSE41-NEXT: movdqa %xmm0, %xmm4 ; SSE41-NEXT: movdqa %xmm1, %xmm4
; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4 ; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
; SSE41-NEXT: movdqa %xmm4, %xmm5 ; SSE41-NEXT: movdqa %xmm4, %xmm5
@ -644,13 +644,13 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4 ; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
; SSE41-NEXT: movdqa %xmm4, %xmm5 ; SSE41-NEXT: movdqa %xmm4, %xmm5
; SSE41-NEXT: paddb %xmm4, %xmm5 ; SSE41-NEXT: paddb %xmm5, %xmm5
; SSE41-NEXT: paddb %xmm3, %xmm3 ; SSE41-NEXT: paddb %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4 ; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
; SSE41-NEXT: psllw $5, %xmm2 ; SSE41-NEXT: psllw $5, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm3 ; SSE41-NEXT: movdqa %xmm2, %xmm3
; SSE41-NEXT: paddb %xmm2, %xmm3 ; SSE41-NEXT: paddb %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm1, %xmm5 ; SSE41-NEXT: movdqa %xmm1, %xmm5
; SSE41-NEXT: psrlw $4, %xmm5 ; SSE41-NEXT: psrlw $4, %xmm5
; SSE41-NEXT: pand {{.*}}(%rip), %xmm5 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm5
@ -1191,7 +1191,7 @@ define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
; SSE41-LABEL: constant_rotate_v16i8: ; SSE41-LABEL: constant_rotate_v16i8:
; SSE41: # BB#0: ; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm0, %xmm3 ; SSE41-NEXT: movdqa %xmm1, %xmm3
; SSE41-NEXT: psllw $4, %xmm3 ; SSE41-NEXT: psllw $4, %xmm3
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,57600,41152,24704,8256] ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,57600,41152,24704,8256]
@ -1203,7 +1203,7 @@ define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
; SSE41-NEXT: paddb %xmm0, %xmm0 ; SSE41-NEXT: paddb %xmm0, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm3 ; SSE41-NEXT: movdqa %xmm2, %xmm3
; SSE41-NEXT: paddb %xmm2, %xmm3 ; SSE41-NEXT: paddb %xmm3, %xmm3
; SSE41-NEXT: paddb %xmm0, %xmm0 ; SSE41-NEXT: paddb %xmm0, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41-NEXT: movdqa %xmm1, %xmm3 ; SSE41-NEXT: movdqa %xmm1, %xmm3

View File

@ -243,7 +243,7 @@ define <8 x i32> @sext_16i8_to_8i32(<16 x i8> %A) nounwind uwtable readnone ssp
; SSSE3-LABEL: sext_16i8_to_8i32: ; SSSE3-LABEL: sext_16i8_to_8i32:
; SSSE3: # BB#0: # %entry ; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1 ; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: psrad $24, %xmm0 ; SSSE3-NEXT: psrad $24, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[u,u,u,4,u,u,u,5,u,u,u,6,u,u,u,7] ; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[u,u,u,4,u,u,u,5,u,u,u,6,u,u,u,7]
@ -312,7 +312,7 @@ define <16 x i32> @sext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss
; SSSE3-LABEL: sext_16i8_to_16i32: ; SSSE3-LABEL: sext_16i8_to_16i32:
; SSSE3: # BB#0: # %entry ; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm3 ; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: psrad $24, %xmm0 ; SSSE3-NEXT: psrad $24, %xmm0
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15] ; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
@ -443,7 +443,7 @@ define <4 x i64> @sext_16i8_to_4i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSSE3-LABEL: sext_16i8_to_4i64: ; SSSE3-LABEL: sext_16i8_to_4i64:
; SSSE3: # BB#0: # %entry ; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1 ; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: movdqa %xmm0, %xmm2 ; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: psrad $31, %xmm2 ; SSSE3-NEXT: psrad $31, %xmm2
@ -499,7 +499,7 @@ define <8 x i64> @sext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE2-LABEL: sext_16i8_to_8i64: ; SSE2-LABEL: sext_16i8_to_8i64:
; SSE2: # BB#0: # %entry ; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psrad $31, %xmm2 ; SSE2-NEXT: psrad $31, %xmm2
@ -1112,7 +1112,7 @@ define <8 x i64> @sext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp
; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: psrad $31, %xmm3 ; SSE2-NEXT: psrad $31, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm4 ; SSE2-NEXT: movdqa %xmm2, %xmm4
; SSE2-NEXT: psrad $31, %xmm4 ; SSE2-NEXT: psrad $31, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
@ -1131,7 +1131,7 @@ define <8 x i64> @sext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp
; SSSE3-NEXT: movdqa %xmm1, %xmm2 ; SSSE3-NEXT: movdqa %xmm1, %xmm2
; SSSE3-NEXT: movdqa %xmm0, %xmm3 ; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: psrad $31, %xmm3 ; SSSE3-NEXT: psrad $31, %xmm3
; SSSE3-NEXT: movdqa %xmm1, %xmm4 ; SSSE3-NEXT: movdqa %xmm2, %xmm4
; SSSE3-NEXT: psrad $31, %xmm4 ; SSSE3-NEXT: psrad $31, %xmm4
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
@ -2228,7 +2228,7 @@ define <8 x i32> @load_sext_8i1_to_8i32(<8 x i1> *%ptr) {
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: pslld $31, %xmm0 ; SSE2-NEXT: pslld $31, %xmm0
; SSE2-NEXT: psrad $31, %xmm0 ; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@ -2277,7 +2277,7 @@ define <8 x i32> @load_sext_8i1_to_8i32(<8 x i1> *%ptr) {
; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] ; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: pslld $31, %xmm0 ; SSSE3-NEXT: pslld $31, %xmm0
; SSSE3-NEXT: psrad $31, %xmm0 ; SSSE3-NEXT: psrad $31, %xmm0
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@ -3079,7 +3079,7 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) {
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: psllw $15, %xmm0 ; SSE2-NEXT: psllw $15, %xmm0
; SSE2-NEXT: psraw $15, %xmm0 ; SSE2-NEXT: psraw $15, %xmm0
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
@ -3168,7 +3168,7 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) {
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: psllw $15, %xmm0 ; SSSE3-NEXT: psllw $15, %xmm0
; SSSE3-NEXT: psraw $15, %xmm0 ; SSSE3-NEXT: psraw $15, %xmm0
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] ; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]

View File

@ -274,7 +274,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE41-NEXT: psllw $4, %xmm1 ; SSE41-NEXT: psllw $4, %xmm1
; SSE41-NEXT: por %xmm0, %xmm1 ; SSE41-NEXT: por %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm3 ; SSE41-NEXT: movdqa %xmm1, %xmm3
; SSE41-NEXT: paddw %xmm1, %xmm3 ; SSE41-NEXT: paddw %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: psraw $8, %xmm4 ; SSE41-NEXT: psraw $8, %xmm4
; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: movdqa %xmm1, %xmm0

View File

@ -245,7 +245,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE41-NEXT: psllw $4, %xmm1 ; SSE41-NEXT: psllw $4, %xmm1
; SSE41-NEXT: por %xmm0, %xmm1 ; SSE41-NEXT: por %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm3 ; SSE41-NEXT: movdqa %xmm1, %xmm3
; SSE41-NEXT: paddw %xmm1, %xmm3 ; SSE41-NEXT: paddw %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: psrlw $8, %xmm4 ; SSE41-NEXT: psrlw $8, %xmm4
; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: movdqa %xmm1, %xmm0
@ -407,7 +407,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41: # BB#0: ; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: psllw $5, %xmm1 ; SSE41-NEXT: psllw $5, %xmm1
; SSE41-NEXT: movdqa %xmm0, %xmm3 ; SSE41-NEXT: movdqa %xmm2, %xmm3
; SSE41-NEXT: psrlw $4, %xmm3 ; SSE41-NEXT: psrlw $4, %xmm3
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: movdqa %xmm1, %xmm0
@ -679,7 +679,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41-NEXT: pshufb %xmm0, %xmm1 ; SSE41-NEXT: pshufb %xmm0, %xmm1
; SSE41-NEXT: psllw $5, %xmm1 ; SSE41-NEXT: psllw $5, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm3 ; SSE41-NEXT: movdqa %xmm1, %xmm3
; SSE41-NEXT: paddb %xmm1, %xmm3 ; SSE41-NEXT: paddb %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: psrlw $4, %xmm4 ; SSE41-NEXT: psrlw $4, %xmm4
; SSE41-NEXT: pand {{.*}}(%rip), %xmm4 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm4
@ -1101,7 +1101,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE41-LABEL: constant_shift_v16i8: ; SSE41-LABEL: constant_shift_v16i8:
; SSE41: # BB#0: ; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: psrlw $4, %xmm2 ; SSE41-NEXT: psrlw $4, %xmm2
; SSE41-NEXT: pand {{.*}}(%rip), %xmm2 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,49376,32928,16480,32] ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,49376,32928,16480,32]

View File

@ -202,7 +202,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE41-NEXT: psllw $4, %xmm1 ; SSE41-NEXT: psllw $4, %xmm1
; SSE41-NEXT: por %xmm0, %xmm1 ; SSE41-NEXT: por %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm3 ; SSE41-NEXT: movdqa %xmm1, %xmm3
; SSE41-NEXT: paddw %xmm1, %xmm3 ; SSE41-NEXT: paddw %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: psllw $8, %xmm4 ; SSE41-NEXT: psllw $8, %xmm4
; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: movdqa %xmm1, %xmm0
@ -361,7 +361,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41: # BB#0: ; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: psllw $5, %xmm1 ; SSE41-NEXT: psllw $5, %xmm1
; SSE41-NEXT: movdqa %xmm0, %xmm3 ; SSE41-NEXT: movdqa %xmm2, %xmm3
; SSE41-NEXT: psllw $4, %xmm3 ; SSE41-NEXT: psllw $4, %xmm3
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: movdqa %xmm1, %xmm0
@ -373,7 +373,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm3 ; SSE41-NEXT: movdqa %xmm2, %xmm3
; SSE41-NEXT: paddb %xmm2, %xmm3 ; SSE41-NEXT: paddb %xmm3, %xmm3
; SSE41-NEXT: paddb %xmm1, %xmm1 ; SSE41-NEXT: paddb %xmm1, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
@ -627,7 +627,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41-NEXT: pshufb %xmm0, %xmm1 ; SSE41-NEXT: pshufb %xmm0, %xmm1
; SSE41-NEXT: psllw $5, %xmm1 ; SSE41-NEXT: psllw $5, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm3 ; SSE41-NEXT: movdqa %xmm1, %xmm3
; SSE41-NEXT: paddb %xmm1, %xmm3 ; SSE41-NEXT: paddb %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: psllw $4, %xmm4 ; SSE41-NEXT: psllw $4, %xmm4
; SSE41-NEXT: pand {{.*}}(%rip), %xmm4 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm4
@ -639,7 +639,7 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2 ; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm1 ; SSE41-NEXT: movdqa %xmm2, %xmm1
; SSE41-NEXT: paddb %xmm2, %xmm1 ; SSE41-NEXT: paddb %xmm1, %xmm1
; SSE41-NEXT: paddb %xmm3, %xmm3 ; SSE41-NEXT: paddb %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2 ; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
@ -957,7 +957,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE41-LABEL: constant_shift_v16i8: ; SSE41-LABEL: constant_shift_v16i8:
; SSE41: # BB#0: ; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: psllw $4, %xmm2 ; SSE41-NEXT: psllw $4, %xmm2
; SSE41-NEXT: pand {{.*}}(%rip), %xmm2 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,49376,32928,16480,32] ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,49376,32928,16480,32]
@ -968,7 +968,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE41-NEXT: paddb %xmm0, %xmm0 ; SSE41-NEXT: paddb %xmm0, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm2 ; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: paddb %xmm1, %xmm2 ; SSE41-NEXT: paddb %xmm2, %xmm2
; SSE41-NEXT: paddb %xmm0, %xmm0 ; SSE41-NEXT: paddb %xmm0, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: movdqa %xmm1, %xmm0

View File

@ -2792,7 +2792,7 @@ define <4 x float> @PR22377(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: PR22377: ; SSE-LABEL: PR22377:
; SSE: # BB#0: # %entry ; SSE: # BB#0: # %entry
; SSE-NEXT: movaps %xmm0, %xmm1 ; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm0[1,3] ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3,1,3]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,0,2] ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,0,2]
; SSE-NEXT: addps %xmm0, %xmm1 ; SSE-NEXT: addps %xmm0, %xmm1
; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]

View File

@ -5198,7 +5198,7 @@ define <4 x i32> @mul_add_const_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwi
; SSE-LABEL: mul_add_const_v4i64_v4i32: ; SSE-LABEL: mul_add_const_v4i64_v4i32:
; SSE: # BB#0: ; SSE: # BB#0:
; SSE-NEXT: movdqa %xmm0, %xmm2 ; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,3,3] ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,1,1,3] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,1,1,3]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]

View File

@ -246,7 +246,7 @@ define <16 x i32> @zext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss
; SSE2: # BB#0: # %entry ; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm4, %xmm4 ; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: movdqa %xmm3, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
@ -261,7 +261,7 @@ define <16 x i32> @zext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ss
; SSSE3: # BB#0: # %entry ; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm3 ; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: pxor %xmm4, %xmm4 ; SSSE3-NEXT: pxor %xmm4, %xmm4
; SSSE3-NEXT: movdqa %xmm0, %xmm1 ; SSSE3-NEXT: movdqa %xmm3, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
@ -399,7 +399,7 @@ define <8 x i64> @zext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp
; SSE2: # BB#0: # %entry ; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm4, %xmm4 ; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: movdqa %xmm1, %xmm0
@ -700,7 +700,7 @@ define <8 x i64> @zext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp
; SSE2: # BB#0: # %entry ; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm4, %xmm4 ; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: movdqa %xmm3, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
@ -715,7 +715,7 @@ define <8 x i64> @zext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp
; SSSE3: # BB#0: # %entry ; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm3 ; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: pxor %xmm4, %xmm4 ; SSSE3-NEXT: pxor %xmm4, %xmm4
; SSSE3-NEXT: movdqa %xmm0, %xmm1 ; SSSE3-NEXT: movdqa %xmm3, %xmm1
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
; SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
@ -1582,7 +1582,7 @@ define <8 x i32> @shuf_zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone
; SSE41: # BB#0: # %entry ; SSE41: # BB#0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm2 ; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] ; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
; SSE41-NEXT: retq ; SSE41-NEXT: retq
; ;
@ -1630,7 +1630,7 @@ define <4 x i64> @shuf_zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone
; SSE41: # BB#0: # %entry ; SSE41: # BB#0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm2 ; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero ; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; SSE41-NEXT: retq ; SSE41-NEXT: retq
; ;

View File

@ -3344,12 +3344,12 @@ define <64 x i8> @test98(<64 x i8> %a, <64 x i8> %b) {
; SSE2: # BB#0: # %entry ; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm8 ; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: movdqa %xmm2, %xmm9 ; SSE2-NEXT: movdqa %xmm2, %xmm9
; SSE2-NEXT: movdqa %xmm3, %xmm12 ; SSE2-NEXT: movdqa %xmm8, %xmm12
; SSE2-NEXT: pcmpgtb %xmm7, %xmm12 ; SSE2-NEXT: pcmpgtb %xmm7, %xmm12
; SSE2-NEXT: pcmpeqd %xmm13, %xmm13 ; SSE2-NEXT: pcmpeqd %xmm13, %xmm13
; SSE2-NEXT: movdqa %xmm12, %xmm3 ; SSE2-NEXT: movdqa %xmm12, %xmm3
; SSE2-NEXT: pxor %xmm13, %xmm3 ; SSE2-NEXT: pxor %xmm13, %xmm3
; SSE2-NEXT: movdqa %xmm2, %xmm14 ; SSE2-NEXT: movdqa %xmm9, %xmm14
; SSE2-NEXT: pcmpgtb %xmm6, %xmm14 ; SSE2-NEXT: pcmpgtb %xmm6, %xmm14
; SSE2-NEXT: movdqa %xmm14, %xmm2 ; SSE2-NEXT: movdqa %xmm14, %xmm2
; SSE2-NEXT: pxor %xmm13, %xmm2 ; SSE2-NEXT: pxor %xmm13, %xmm2
@ -3487,12 +3487,12 @@ define <64 x i8> @test100(<64 x i8> %a, <64 x i8> %b) {
; SSE2-NEXT: movdqa %xmm2, %xmm9 ; SSE2-NEXT: movdqa %xmm2, %xmm9
; SSE2-NEXT: movdqa %xmm0, %xmm10 ; SSE2-NEXT: movdqa %xmm0, %xmm10
; SSE2-NEXT: movdqa %xmm7, %xmm12 ; SSE2-NEXT: movdqa %xmm7, %xmm12
; SSE2-NEXT: pcmpgtb %xmm3, %xmm12 ; SSE2-NEXT: pcmpgtb %xmm8, %xmm12
; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
; SSE2-NEXT: movdqa %xmm12, %xmm3 ; SSE2-NEXT: movdqa %xmm12, %xmm3
; SSE2-NEXT: pxor %xmm0, %xmm3 ; SSE2-NEXT: pxor %xmm0, %xmm3
; SSE2-NEXT: movdqa %xmm6, %xmm13 ; SSE2-NEXT: movdqa %xmm6, %xmm13
; SSE2-NEXT: pcmpgtb %xmm2, %xmm13 ; SSE2-NEXT: pcmpgtb %xmm9, %xmm13
; SSE2-NEXT: movdqa %xmm13, %xmm2 ; SSE2-NEXT: movdqa %xmm13, %xmm2
; SSE2-NEXT: pxor %xmm0, %xmm2 ; SSE2-NEXT: pxor %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm5, %xmm14 ; SSE2-NEXT: movdqa %xmm5, %xmm14
@ -4225,12 +4225,12 @@ define <16 x i32> @test114(<16 x i32> %a, <16 x i32> %b) {
; SSE2: # BB#0: # %entry ; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm8 ; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: movdqa %xmm2, %xmm9 ; SSE2-NEXT: movdqa %xmm2, %xmm9
; SSE2-NEXT: movdqa %xmm3, %xmm12 ; SSE2-NEXT: movdqa %xmm8, %xmm12
; SSE2-NEXT: pcmpgtd %xmm7, %xmm12 ; SSE2-NEXT: pcmpgtd %xmm7, %xmm12
; SSE2-NEXT: pcmpeqd %xmm13, %xmm13 ; SSE2-NEXT: pcmpeqd %xmm13, %xmm13
; SSE2-NEXT: movdqa %xmm12, %xmm3 ; SSE2-NEXT: movdqa %xmm12, %xmm3
; SSE2-NEXT: pxor %xmm13, %xmm3 ; SSE2-NEXT: pxor %xmm13, %xmm3
; SSE2-NEXT: movdqa %xmm2, %xmm14 ; SSE2-NEXT: movdqa %xmm9, %xmm14
; SSE2-NEXT: pcmpgtd %xmm6, %xmm14 ; SSE2-NEXT: pcmpgtd %xmm6, %xmm14
; SSE2-NEXT: movdqa %xmm14, %xmm2 ; SSE2-NEXT: movdqa %xmm14, %xmm2
; SSE2-NEXT: pxor %xmm13, %xmm2 ; SSE2-NEXT: pxor %xmm13, %xmm2
@ -4368,12 +4368,12 @@ define <16 x i32> @test116(<16 x i32> %a, <16 x i32> %b) {
; SSE2-NEXT: movdqa %xmm2, %xmm9 ; SSE2-NEXT: movdqa %xmm2, %xmm9
; SSE2-NEXT: movdqa %xmm0, %xmm10 ; SSE2-NEXT: movdqa %xmm0, %xmm10
; SSE2-NEXT: movdqa %xmm7, %xmm12 ; SSE2-NEXT: movdqa %xmm7, %xmm12
; SSE2-NEXT: pcmpgtd %xmm3, %xmm12 ; SSE2-NEXT: pcmpgtd %xmm8, %xmm12
; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
; SSE2-NEXT: movdqa %xmm12, %xmm3 ; SSE2-NEXT: movdqa %xmm12, %xmm3
; SSE2-NEXT: pxor %xmm0, %xmm3 ; SSE2-NEXT: pxor %xmm0, %xmm3
; SSE2-NEXT: movdqa %xmm6, %xmm13 ; SSE2-NEXT: movdqa %xmm6, %xmm13
; SSE2-NEXT: pcmpgtd %xmm2, %xmm13 ; SSE2-NEXT: pcmpgtd %xmm9, %xmm13
; SSE2-NEXT: movdqa %xmm13, %xmm2 ; SSE2-NEXT: movdqa %xmm13, %xmm2
; SSE2-NEXT: pxor %xmm0, %xmm2 ; SSE2-NEXT: pxor %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm5, %xmm14 ; SSE2-NEXT: movdqa %xmm5, %xmm14
@ -4890,7 +4890,7 @@ define <8 x i64> @test122(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test122: ; SSE2-LABEL: test122:
; SSE2: # BB#0: # %entry ; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm8 ; SSE2-NEXT: movdqa %xmm7, %xmm8
; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: movdqa %xmm1, %xmm2
@ -5164,7 +5164,7 @@ define <8 x i64> @test124(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test124: ; SSE2-LABEL: test124:
; SSE2: # BB#0: # %entry ; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm11 ; SSE2-NEXT: movdqa %xmm7, %xmm11
; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: movdqa %xmm1, %xmm2
@ -5467,7 +5467,7 @@ define <8 x i64> @test126(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test126: ; SSE2-LABEL: test126:
; SSE2: # BB#0: # %entry ; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm8 ; SSE2-NEXT: movdqa %xmm7, %xmm8
; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: movdqa %xmm1, %xmm2
@ -5795,7 +5795,7 @@ define <8 x i64> @test128(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test128: ; SSE2-LABEL: test128:
; SSE2: # BB#0: # %entry ; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm11 ; SSE2-NEXT: movdqa %xmm7, %xmm11
; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: movdqa %xmm1, %xmm2
@ -6047,7 +6047,7 @@ define <64 x i8> @test130(<64 x i8> %a, <64 x i8> %b) {
; SSE2-NEXT: pcmpeqd %xmm13, %xmm13 ; SSE2-NEXT: pcmpeqd %xmm13, %xmm13
; SSE2-NEXT: movdqa %xmm12, %xmm9 ; SSE2-NEXT: movdqa %xmm12, %xmm9
; SSE2-NEXT: pxor %xmm13, %xmm9 ; SSE2-NEXT: pxor %xmm13, %xmm9
; SSE2-NEXT: movdqa %xmm2, %xmm14 ; SSE2-NEXT: movdqa %xmm8, %xmm14
; SSE2-NEXT: pcmpgtb %xmm6, %xmm14 ; SSE2-NEXT: pcmpgtb %xmm6, %xmm14
; SSE2-NEXT: movdqa %xmm14, %xmm2 ; SSE2-NEXT: movdqa %xmm14, %xmm2
; SSE2-NEXT: pxor %xmm13, %xmm2 ; SSE2-NEXT: pxor %xmm13, %xmm2
@ -6190,7 +6190,7 @@ define <64 x i8> @test132(<64 x i8> %a, <64 x i8> %b) {
; SSE2-NEXT: movdqa %xmm12, %xmm9 ; SSE2-NEXT: movdqa %xmm12, %xmm9
; SSE2-NEXT: pxor %xmm0, %xmm9 ; SSE2-NEXT: pxor %xmm0, %xmm9
; SSE2-NEXT: movdqa %xmm6, %xmm13 ; SSE2-NEXT: movdqa %xmm6, %xmm13
; SSE2-NEXT: pcmpgtb %xmm2, %xmm13 ; SSE2-NEXT: pcmpgtb %xmm8, %xmm13
; SSE2-NEXT: movdqa %xmm13, %xmm2 ; SSE2-NEXT: movdqa %xmm13, %xmm2
; SSE2-NEXT: pxor %xmm0, %xmm2 ; SSE2-NEXT: pxor %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm5, %xmm14 ; SSE2-NEXT: movdqa %xmm5, %xmm14
@ -6941,7 +6941,7 @@ define <16 x i32> @test146(<16 x i32> %a, <16 x i32> %b) {
; SSE2-NEXT: pcmpeqd %xmm13, %xmm13 ; SSE2-NEXT: pcmpeqd %xmm13, %xmm13
; SSE2-NEXT: movdqa %xmm12, %xmm9 ; SSE2-NEXT: movdqa %xmm12, %xmm9
; SSE2-NEXT: pxor %xmm13, %xmm9 ; SSE2-NEXT: pxor %xmm13, %xmm9
; SSE2-NEXT: movdqa %xmm2, %xmm14 ; SSE2-NEXT: movdqa %xmm8, %xmm14
; SSE2-NEXT: pcmpgtd %xmm6, %xmm14 ; SSE2-NEXT: pcmpgtd %xmm6, %xmm14
; SSE2-NEXT: movdqa %xmm14, %xmm2 ; SSE2-NEXT: movdqa %xmm14, %xmm2
; SSE2-NEXT: pxor %xmm13, %xmm2 ; SSE2-NEXT: pxor %xmm13, %xmm2
@ -7084,7 +7084,7 @@ define <16 x i32> @test148(<16 x i32> %a, <16 x i32> %b) {
; SSE2-NEXT: movdqa %xmm12, %xmm9 ; SSE2-NEXT: movdqa %xmm12, %xmm9
; SSE2-NEXT: pxor %xmm0, %xmm9 ; SSE2-NEXT: pxor %xmm0, %xmm9
; SSE2-NEXT: movdqa %xmm6, %xmm13 ; SSE2-NEXT: movdqa %xmm6, %xmm13
; SSE2-NEXT: pcmpgtd %xmm2, %xmm13 ; SSE2-NEXT: pcmpgtd %xmm8, %xmm13
; SSE2-NEXT: movdqa %xmm13, %xmm2 ; SSE2-NEXT: movdqa %xmm13, %xmm2
; SSE2-NEXT: pxor %xmm0, %xmm2 ; SSE2-NEXT: pxor %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm5, %xmm14 ; SSE2-NEXT: movdqa %xmm5, %xmm14
@ -7610,7 +7610,7 @@ define <8 x i64> @test154(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test154: ; SSE2-LABEL: test154:
; SSE2: # BB#0: # %entry ; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm8 ; SSE2-NEXT: movdqa %xmm7, %xmm8
; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: movdqa %xmm1, %xmm2
@ -7882,7 +7882,7 @@ define <8 x i64> @test156(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test156: ; SSE2-LABEL: test156:
; SSE2: # BB#0: # %entry ; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm11 ; SSE2-NEXT: movdqa %xmm7, %xmm11
; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: movdqa %xmm1, %xmm2
@ -8183,7 +8183,7 @@ define <8 x i64> @test158(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test158: ; SSE2-LABEL: test158:
; SSE2: # BB#0: # %entry ; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm8 ; SSE2-NEXT: movdqa %xmm7, %xmm8
; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: movdqa %xmm1, %xmm2
@ -8509,7 +8509,7 @@ define <8 x i64> @test160(<8 x i64> %a, <8 x i64> %b) {
; SSE2-LABEL: test160: ; SSE2-LABEL: test160:
; SSE2: # BB#0: # %entry ; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm11 ; SSE2-NEXT: movdqa %xmm7, %xmm11
; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7 ; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: movdqa %xmm1, %xmm2
@ -10289,7 +10289,7 @@ define <2 x i64> @test180(<2 x i64> %a, <2 x i64> %b) {
; SSE4: # BB#0: # %entry ; SSE4: # BB#0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2 ; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: movdqa %xmm1, %xmm3 ; SSE4-NEXT: movdqa %xmm1, %xmm3
; SSE4-NEXT: pcmpgtq %xmm0, %xmm3 ; SSE4-NEXT: pcmpgtq %xmm2, %xmm3
; SSE4-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE4-NEXT: pcmpeqd %xmm0, %xmm0
; SSE4-NEXT: pxor %xmm3, %xmm0 ; SSE4-NEXT: pxor %xmm3, %xmm0
; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm1 ; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm1
@ -10768,7 +10768,7 @@ define <2 x i64> @test188(<2 x i64> %a, <2 x i64> %b) {
; SSE4: # BB#0: # %entry ; SSE4: # BB#0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2 ; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: movdqa %xmm1, %xmm3 ; SSE4-NEXT: movdqa %xmm1, %xmm3
; SSE4-NEXT: pcmpgtq %xmm0, %xmm3 ; SSE4-NEXT: pcmpgtq %xmm2, %xmm3
; SSE4-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE4-NEXT: pcmpeqd %xmm0, %xmm0
; SSE4-NEXT: pxor %xmm3, %xmm0 ; SSE4-NEXT: pxor %xmm3, %xmm0
; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm2 ; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm2

View File

@ -74,7 +74,7 @@ define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr)
; X86-SSE2-NEXT: cvtdq2ps %xmm0, %xmm0 ; X86-SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
; X86-SSE2-NEXT: movss %xmm0, (%eax) ; X86-SSE2-NEXT: movss %xmm0, (%eax)
; X86-SSE2-NEXT: movaps %xmm0, %xmm1 ; X86-SSE2-NEXT: movaps %xmm0, %xmm1
; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1] ; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; X86-SSE2-NEXT: movss %xmm1, 8(%eax) ; X86-SSE2-NEXT: movss %xmm1, 8(%eax)
; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
; X86-SSE2-NEXT: movss %xmm0, 4(%eax) ; X86-SSE2-NEXT: movss %xmm0, 4(%eax)

View File

@ -19,7 +19,7 @@ define void @convert_v7i16_v7f32(<7 x float>* %dst.addr, <7 x i16> %src) nounwin
; X86-SSE2-NEXT: movups %xmm0, (%eax) ; X86-SSE2-NEXT: movups %xmm0, (%eax)
; X86-SSE2-NEXT: movss %xmm2, 16(%eax) ; X86-SSE2-NEXT: movss %xmm2, 16(%eax)
; X86-SSE2-NEXT: movaps %xmm2, %xmm0 ; X86-SSE2-NEXT: movaps %xmm2, %xmm0
; X86-SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm2[1],xmm0[1] ; X86-SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; X86-SSE2-NEXT: movss %xmm0, 24(%eax) ; X86-SSE2-NEXT: movss %xmm0, 24(%eax)
; X86-SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3] ; X86-SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
; X86-SSE2-NEXT: movss %xmm2, 20(%eax) ; X86-SSE2-NEXT: movss %xmm2, 20(%eax)
@ -100,7 +100,7 @@ define void @convert_v3i8_to_v3f32(<3 x float>* %dst.addr, <3 x i8>* %src.addr)
; X86-SSE2-NEXT: cvtdq2ps %xmm0, %xmm0 ; X86-SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
; X86-SSE2-NEXT: movss %xmm0, (%eax) ; X86-SSE2-NEXT: movss %xmm0, (%eax)
; X86-SSE2-NEXT: movaps %xmm0, %xmm1 ; X86-SSE2-NEXT: movaps %xmm0, %xmm1
; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1] ; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; X86-SSE2-NEXT: movss %xmm1, 8(%eax) ; X86-SSE2-NEXT: movss %xmm1, 8(%eax)
; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
; X86-SSE2-NEXT: movss %xmm0, 4(%eax) ; X86-SSE2-NEXT: movss %xmm0, 4(%eax)

View File

@ -23,7 +23,7 @@ target triple = "x86_64-apple-macosx"
; Compare the arguments and jump to exit. ; Compare the arguments and jump to exit.
; After the prologue is set. ; After the prologue is set.
; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]] ; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]]
; CHECK-NEXT: cmpl %esi, %edi ; CHECK-NEXT: cmpl %esi, [[ARG0CPY]]
; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]] ; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
; ;
; Store %a in the alloca. ; Store %a in the alloca.
@ -69,7 +69,7 @@ attributes #0 = { "no-frame-pointer-elim"="false" }
; Compare the arguments and jump to exit. ; Compare the arguments and jump to exit.
; After the prologue is set. ; After the prologue is set.
; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]] ; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]]
; CHECK-NEXT: cmpl %esi, %edi ; CHECK-NEXT: cmpl %esi, [[ARG0CPY]]
; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]] ; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
; ;
; Prologue code. ; Prologue code.
@ -115,7 +115,7 @@ attributes #1 = { "no-frame-pointer-elim"="true" }
; Compare the arguments and jump to exit. ; Compare the arguments and jump to exit.
; After the prologue is set. ; After the prologue is set.
; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]] ; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]]
; CHECK-NEXT: cmpl %esi, %edi ; CHECK-NEXT: cmpl %esi, [[ARG0CPY]]
; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]] ; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
; ;
; Prologue code. ; Prologue code.

View File

@ -17,7 +17,7 @@ target triple = "x86_64-apple-macosx"
; Compare the arguments and jump to exit. ; Compare the arguments and jump to exit.
; No prologue needed. ; No prologue needed.
; ENABLE: movl %edi, [[ARG0CPY:%e[a-z]+]] ; ENABLE: movl %edi, [[ARG0CPY:%e[a-z]+]]
; ENABLE-NEXT: cmpl %esi, %edi ; ENABLE-NEXT: cmpl %esi, [[ARG0CPY]]
; ENABLE-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]] ; ENABLE-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
; ;
; Prologue code. ; Prologue code.
@ -27,7 +27,7 @@ target triple = "x86_64-apple-macosx"
; Compare the arguments and jump to exit. ; Compare the arguments and jump to exit.
; After the prologue is set. ; After the prologue is set.
; DISABLE: movl %edi, [[ARG0CPY:%e[a-z]+]] ; DISABLE: movl %edi, [[ARG0CPY:%e[a-z]+]]
; DISABLE-NEXT: cmpl %esi, %edi ; DISABLE-NEXT: cmpl %esi, [[ARG0CPY]]
; DISABLE-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]] ; DISABLE-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
; ;
; Store %a in the alloca. ; Store %a in the alloca.