GlobalISel: restrict G_EXTRACT instruction to just one operand.

A bit more painful than G_INSERT because it was more widely used, but this
should simplify the handling of extract operations in most locations.

llvm-svn: 297100
This commit is contained in:
Tim Northover 2017-03-06 23:50:28 +00:00
parent ef23e9574e
commit c2c545b8f7
21 changed files with 151 additions and 115 deletions

View File

@ -440,19 +440,13 @@ public:
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr,
MachineMemOperand &MMO);
/// Build and insert `Res0<def>, ... = G_EXTRACT Src, Idx0, ...`.
///
/// If \p Res[i] has size N bits, G_EXTRACT sets \p Res[i] to bits `[Idxs[i],
/// Idxs[i] + N)` of \p Src.
/// Build and insert `Res0<def>, ... = G_EXTRACT Src, Idx0`.
///
/// \pre setBasicBlock or setMI must have been called.
/// \pre Indices must be in ascending order of bit position.
/// \pre Each member of \p Results and \p Src must be a generic
/// virtual register.
/// \pre \p Res and \p Src must be generic virtual registers.
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildExtract(ArrayRef<unsigned> Results,
ArrayRef<uint64_t> Indices, unsigned Src);
MachineInstrBuilder buildExtract(unsigned Res, unsigned Src, uint64_t Index);
/// Build and insert \p Res = IMPLICIT_DEF.
MachineInstrBuilder buildUndef(unsigned Dst);

View File

@ -429,8 +429,8 @@ def G_STORE : Instruction {
// indexes. This will almost certainly be mapped to sub-register COPYs after
// register banks have been selected.
def G_EXTRACT : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins variable_ops);
let OutOperandList = (outs type0:$res);
let InOperandList = (ins type1:$src, unknown:$offset);
let hasSideEffects = 0;
}

View File

@ -341,7 +341,7 @@ bool IRTranslator::translateExtractValue(const User &U,
uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
unsigned Res = getOrCreateVReg(U);
MIRBuilder.buildExtract(Res, Offset, getOrCreateVReg(*Src));
MIRBuilder.buildExtract(Res, getOrCreateVReg(*Src), Offset);
return true;
}

View File

@ -202,8 +202,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
if (OpSegSize != OpSize) {
// A genuine extract is needed.
OpSegReg = MRI.createGenericVirtualRegister(LLT::scalar(OpSegSize));
MIRBuilder.buildExtract(OpSegReg, std::max(OpSegStart, (int64_t)0),
OpReg);
MIRBuilder.buildExtract(OpSegReg, OpReg,
std::max(OpSegStart, (int64_t)0));
}
unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);

View File

@ -382,34 +382,25 @@ MachineInstrBuilder MachineIRBuilder::buildCast(unsigned Dst, unsigned Src) {
return buildInstr(Opcode).addDef(Dst).addUse(Src);
}
MachineInstrBuilder MachineIRBuilder::buildExtract(ArrayRef<unsigned> Results,
ArrayRef<uint64_t> Indices,
unsigned Src) {
MachineInstrBuilder MachineIRBuilder::buildExtract(unsigned Res, unsigned Src,
uint64_t Index) {
#ifndef NDEBUG
assert(Results.size() == Indices.size() && "inconsistent number of regs");
assert(!Results.empty() && "invalid trivial extract");
assert(std::is_sorted(Indices.begin(), Indices.end()) &&
"extract offsets must be in ascending order");
assert(MRI->getType(Src).isValid() && "invalid operand type");
for (auto Res : Results)
assert(MRI->getType(Res).isValid() && "invalid operand type");
assert(MRI->getType(Res).isValid() && "invalid operand type");
assert(Index + MRI->getType(Res).getSizeInBits() <=
MRI->getType(Src).getSizeInBits() &&
"extracting off end of register");
#endif
auto MIB = BuildMI(getMF(), DL, getTII().get(TargetOpcode::G_EXTRACT));
for (auto Res : Results)
MIB.addDef(Res);
if (MRI->getType(Res).getSizeInBits() == MRI->getType(Src).getSizeInBits()) {
assert(Index == 0 && "insertion past the end of a register");
return buildCast(Res, Src);
}
MIB.addUse(Src);
for (auto Idx : Indices)
MIB.addImm(Idx);
getMBB().insert(getInsertPt(), MIB);
if (InsertedInstr)
InsertedInstr(MIB);
return MIB;
return buildInstr(TargetOpcode::G_EXTRACT)
.addDef(Res)
.addUse(Src)
.addImm(Index);
}
MachineInstrBuilder

View File

@ -200,15 +200,8 @@ void AArch64CallLowering::splitToValueTypes(
OrigArg.Flags, OrigArg.IsFixed});
}
SmallVector<uint64_t, 4> BitOffsets;
for (auto Offset : Offsets)
BitOffsets.push_back(Offset * 8);
SmallVector<unsigned, 8> SplitRegs;
for (auto I = &SplitArgs[FirstRegIdx]; I != SplitArgs.end(); ++I)
SplitRegs.push_back(I->Reg);
PerformArgSplit(SplitRegs, BitOffsets);
for (unsigned i = 0; i < Offsets.size(); ++i)
PerformArgSplit(SplitArgs[FirstRegIdx + i].Reg, Offsets[i] * 8);
}
bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
@ -230,8 +223,8 @@ bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
SmallVector<ArgInfo, 8> SplitArgs;
splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
[&](ArrayRef<unsigned> Regs, ArrayRef<uint64_t> Offsets) {
MIRBuilder.buildExtract(Regs, Offsets, VReg);
[&](unsigned Reg, uint64_t Offset) {
MIRBuilder.buildExtract(Reg, VReg, Offset);
});
OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFn, AssignFn);
@ -256,10 +249,24 @@ bool AArch64CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
for (auto &Arg : Args) {
ArgInfo OrigArg{VRegs[i], Arg.getType()};
setArgFlags(OrigArg, i + 1, DL, F);
bool Split = false;
LLT Ty = MRI.getType(VRegs[i]);
unsigned Dst = VRegs[i];
splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
[&](ArrayRef<unsigned> Regs, ArrayRef<uint64_t> Offsets) {
MIRBuilder.buildSequence(VRegs[i], Regs, Offsets);
[&](unsigned Reg, uint64_t Offset) {
if (!Split) {
Split = true;
Dst = MRI.createGenericVirtualRegister(Ty);
MIRBuilder.buildUndef(Dst);
}
unsigned Tmp = MRI.createGenericVirtualRegister(Ty);
MIRBuilder.buildInsert(Tmp, Dst, Reg, Offset);
Dst = Tmp;
});
if (Dst != VRegs[i])
MIRBuilder.buildCopy(VRegs[i], Dst);
++i;
}
@ -307,8 +314,8 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
SmallVector<ArgInfo, 8> SplitArgs;
for (auto &OrigArg : OrigArgs) {
splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
[&](ArrayRef<unsigned> Regs, ArrayRef<uint64_t> Offsets) {
MIRBuilder.buildExtract(Regs, Offsets, OrigArg.Reg);
[&](unsigned Reg, uint64_t Offset) {
MIRBuilder.buildExtract(Reg, OrigArg.Reg, Offset);
});
}
@ -360,11 +367,9 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
SmallVector<uint64_t, 8> RegOffsets;
SmallVector<unsigned, 8> SplitRegs;
splitToValueTypes(OrigRet, SplitArgs, DL, MRI,
[&](ArrayRef<unsigned> Regs, ArrayRef<uint64_t> Offsets) {
std::copy(Offsets.begin(), Offsets.end(),
std::back_inserter(RegOffsets));
std::copy(Regs.begin(), Regs.end(),
std::back_inserter(SplitRegs));
[&](unsigned Reg, uint64_t Offset) {
RegOffsets.push_back(Offset);
SplitRegs.push_back(Reg);
});
CallReturnHandler Handler(MIRBuilder, MRI, MIB, RetAssignFn);

View File

@ -46,8 +46,7 @@ private:
typedef std::function<void(MachineIRBuilder &, int, CCValAssign &)>
MemHandler;
typedef std::function<void(ArrayRef<unsigned>, ArrayRef<uint64_t>)>
SplitArgTy;
typedef std::function<void(unsigned, uint64_t)> SplitArgTy;
void splitToValueTypes(const ArgInfo &OrigArgInfo,
SmallVectorImpl<ArgInfo> &SplitArgs,

View File

@ -118,8 +118,8 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler {
unsigned NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
MRI.createGenericVirtualRegister(LLT::scalar(32))};
MIRBuilder.buildExtract(NewRegs, {0, 32}, Arg.Reg);
MIRBuilder.buildExtract(NewRegs[0], Arg.Reg, 0);
MIRBuilder.buildExtract(NewRegs[1], Arg.Reg, 32);
bool IsLittle = MIRBuilder.getMF().getSubtarget<ARMSubtarget>().isLittle();
if (!IsLittle)

View File

@ -148,23 +148,19 @@ static bool selectExtract(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII,
(void)VReg0;
assert(MRI.getType(VReg0).getSizeInBits() == 32 &&
RBI.getRegBank(VReg0, MRI, TRI)->getID() == ARM::GPRRegBankID &&
"Unsupported operand for G_SEQUENCE");
"Unsupported operand for G_EXTRACT");
unsigned VReg1 = MIB->getOperand(1).getReg();
(void)VReg1;
assert(MRI.getType(VReg1).getSizeInBits() == 32 &&
RBI.getRegBank(VReg1, MRI, TRI)->getID() == ARM::GPRRegBankID &&
"Unsupported operand for G_SEQUENCE");
unsigned VReg2 = MIB->getOperand(2).getReg();
(void)VReg2;
assert(MRI.getType(VReg2).getSizeInBits() == 64 &&
RBI.getRegBank(VReg2, MRI, TRI)->getID() == ARM::FPRRegBankID &&
"Unsupported operand for G_SEQUENCE");
assert(MRI.getType(VReg1).getSizeInBits() == 64 &&
RBI.getRegBank(VReg1, MRI, TRI)->getID() == ARM::FPRRegBankID &&
"Unsupported operand for G_EXTRACT");
assert(MIB->getOperand(2).getImm() % 32 == 0 &&
"Unsupported operand for G_EXTRACT");
// Remove the operands corresponding to the offsets.
MIB->RemoveOperand(4);
MIB->RemoveOperand(3);
MIB->getOperand(2).setImm(MIB->getOperand(2).getImm() / 32);
MIB->setDesc(TII.get(ARM::VMOVRRD));
MIB->setDesc(TII.get(ARM::VGETLNi32));
MIB.add(predOps(ARMCC::AL));
return true;

View File

@ -263,12 +263,10 @@ ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
// We only support G_EXTRACT for splitting a double precision floating point
// value into two GPRs.
LLT Ty1 = MRI.getType(MI.getOperand(1).getReg());
LLT Ty2 = MRI.getType(MI.getOperand(2).getReg());
if (Ty.getSizeInBits() != 32 || Ty1.getSizeInBits() != 32 ||
Ty2.getSizeInBits() != 64)
if (Ty.getSizeInBits() != 32 || Ty1.getSizeInBits() != 64 ||
MI.getOperand(2).getImm() % 32 != 0)
return InstructionMapping{};
OperandsMapping = getOperandsMapping({&ARM::ValueMappings[ARM::GPR3OpsIdx],
&ARM::ValueMappings[ARM::GPR3OpsIdx],
&ARM::ValueMappings[ARM::DPR3OpsIdx],
nullptr, nullptr});
break;

View File

@ -61,11 +61,8 @@ void X86CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
ArgInfo Info = ArgInfo{MRI.createGenericVirtualRegister(LLT{*PartTy, DL}),
PartTy, OrigArg.Flags};
SplitArgs.push_back(Info);
BitOffsets.push_back(PartVT.getSizeInBits() * i);
SplitRegs.push_back(Info.Reg);
PerformArgSplit(Info.Reg, PartVT.getSizeInBits() * i);
}
PerformArgSplit(SplitRegs, BitOffsets);
}
namespace {
@ -113,8 +110,8 @@ bool X86CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
SmallVector<ArgInfo, 8> SplitArgs;
splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
[&](ArrayRef<unsigned> Regs, ArrayRef<uint64_t> Offsets) {
MIRBuilder.buildExtract(Regs, Offsets, VReg);
[&](unsigned Reg, uint64_t Offset) {
MIRBuilder.buildExtract(Reg, VReg, Offset);
});
FuncReturnHandler Handler(MIRBuilder, MRI, MIB, RetCC_X86);
@ -184,10 +181,22 @@ bool X86CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
for (auto &Arg : F.getArgumentList()) {
ArgInfo OrigArg(VRegs[Idx], Arg.getType());
setArgFlags(OrigArg, Idx + 1, DL, F);
LLT Ty = MRI.getType(VRegs[Idx]);
unsigned Dst = VRegs[Idx];
bool Split = false;
splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
[&](ArrayRef<unsigned> Regs, ArrayRef<uint64_t> Offsets) {
MIRBuilder.buildSequence(VRegs[Idx], Regs, Offsets);
[&](unsigned Reg, uint64_t Offset) {
if (!Split) {
Split = true;
Dst = MRI.createGenericVirtualRegister(Ty);
MIRBuilder.buildUndef(Dst);
}
unsigned Tmp = MRI.createGenericVirtualRegister(Ty);
MIRBuilder.buildInsert(Tmp, Dst, Reg, Offset);
Dst = Tmp;
});
if (Dst != VRegs[Idx])
MIRBuilder.buildCopy(VRegs[Idx], Dst);
Idx++;
}

View File

@ -36,8 +36,7 @@ public:
ArrayRef<unsigned> VRegs) const override;
private:
/// A function of this type is used to perform value split action.
typedef std::function<void(ArrayRef<unsigned>, ArrayRef<uint64_t>)>
SplitArgTy;
typedef std::function<void(unsigned, uint64_t)> SplitArgTy;
void splitToValueTypes(const ArgInfo &OrigArgInfo,
SmallVectorImpl<ArgInfo> &SplitArgs,

View File

@ -63,7 +63,13 @@ define void @test_multiple_args(i64 %in) {
; CHECK: [[I64:%[0-9]+]](s64) = COPY %x0
; CHECK: [[I8:%[0-9]+]](s8) = COPY %w1
; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x2
; CHECK: [[ARG:%[0-9]+]](s192) = G_SEQUENCE [[DBL]](s64), 0, [[I64]](s64), 64, [[I8]](s8), 128
; CHECK: [[UNDEF:%[0-9]+]](s192) = IMPLICIT_DEF
; CHECK: [[ARG0:%[0-9]+]](s192) = G_INSERT [[UNDEF]], [[DBL]](s64), 0
; CHECK: [[ARG1:%[0-9]+]](s192) = G_INSERT [[ARG0]], [[I64]](s64), 64
; CHECK: [[ARG2:%[0-9]+]](s192) = G_INSERT [[ARG1]], [[I8]](s8), 128
; CHECK: [[ARG:%[0-9]+]](s192) = COPY [[ARG2]]
; CHECK: G_STORE [[ARG]](s192), [[ADDR]](p0)
; CHECK: RET_ReallyLR
define void @test_struct_formal({double, i64, i8} %in, {double, i64, i8}* %addr) {
@ -75,7 +81,11 @@ define void @test_struct_formal({double, i64, i8} %in, {double, i64, i8}* %addr)
; CHECK-LABEL: name: test_struct_return
; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x0
; CHECK: [[VAL:%[0-9]+]](s192) = G_LOAD [[ADDR]](p0)
; CHECK: [[DBL:%[0-9]+]](s64), [[I64:%[0-9]+]](s64), [[I32:%[0-9]+]](s32) = G_EXTRACT [[VAL]](s192), 0, 64, 128
; CHECK: [[DBL:%[0-9]+]](s64) = G_EXTRACT [[VAL]](s192), 0
; CHECK: [[I64:%[0-9]+]](s64) = G_EXTRACT [[VAL]](s192), 64
; CHECK: [[I32:%[0-9]+]](s32) = G_EXTRACT [[VAL]](s192), 128
; CHECK: %d0 = COPY [[DBL]](s64)
; CHECK: %x0 = COPY [[I64]](s64)
; CHECK: %w1 = COPY [[I32]](s32)
@ -87,7 +97,12 @@ define {double, i64, i32} @test_struct_return({double, i64, i32}* %addr) {
; CHECK-LABEL: name: test_arr_call
; CHECK: [[ARG:%[0-9]+]](s256) = G_LOAD
; CHECK: [[E0:%[0-9]+]](s64), [[E1:%[0-9]+]](s64), [[E2:%[0-9]+]](s64), [[E3:%[0-9]+]](s64) = G_EXTRACT [[ARG]](s256), 0, 64, 128, 192
; CHECK: [[E0:%[0-9]+]](s64) = G_EXTRACT [[ARG]](s256), 0
; CHECK: [[E1:%[0-9]+]](s64) = G_EXTRACT [[ARG]](s256), 64
; CHECK: [[E2:%[0-9]+]](s64) = G_EXTRACT [[ARG]](s256), 128
; CHECK: [[E3:%[0-9]+]](s64) = G_EXTRACT [[ARG]](s256), 192
; CHECK: %x0 = COPY [[E0]](s64)
; CHECK: %x1 = COPY [[E1]](s64)
; CHECK: %x2 = COPY [[E2]](s64)

View File

@ -23,7 +23,8 @@ declare i32 @llvm.eh.typeid.for(i8*)
; CHECK: [[SEL_PTR:%[0-9]+]](p0) = COPY %x1
; CHECK: [[SEL:%[0-9]+]](s32) = G_PTRTOINT [[SEL_PTR]]
; CHECK: [[PTR_SEL:%[0-9]+]](s128) = G_SEQUENCE [[PTR]](p0), 0, [[SEL]](s32), 64
; CHECK: [[PTR_RET:%[0-9]+]](s64), [[SEL_RET:%[0-9]+]](s32) = G_EXTRACT [[PTR_SEL]](s128), 0, 64
; CHECK: [[PTR_RET:%[0-9]+]](s64) = G_EXTRACT [[PTR_SEL]](s128), 0
; CHECK: [[SEL_RET:%[0-9]+]](s32) = G_EXTRACT [[PTR_SEL]](s128), 64
; CHECK: %x0 = COPY [[PTR_RET]]
; CHECK: %w1 = COPY [[SEL_RET]]

View File

@ -70,7 +70,8 @@ body: |
; CHECK: %5(s32) = G_ADD %0, %1
%1:_(s32) = G_ADD %0, %0
%2:_(s64) = G_SEQUENCE %0, 0, %1, 32
%3:_(s32), %4:_(s32) = G_EXTRACT %2, 0, 32
%3:_(s32) = G_EXTRACT %2, 0
%4:_(s32) = G_EXTRACT %2, 32
%5:_(s32) = G_ADD %3, %4
...

View File

@ -515,8 +515,10 @@ body: |
%2(s64) = G_SEQUENCE %0(s32), 0, %1(s32), 1
; CHECK: %[[DREG]] = VMOVDRR [[IN1]], [[IN2]]
%3(s32), %4(s32) = G_EXTRACT %2(s64), 0, 32
; CHECK: [[OUT1:%[0-9]+]], [[OUT2:%[0-9]+]] = VMOVRRD %[[DREG]]
%3(s32) = G_EXTRACT %2(s64), 0
%4(s32) = G_EXTRACT %2(s64), 32
; CHECK: [[OUT1:%[0-9]+]] = VGETLNi32 %[[DREG]], 0
; CHECK: [[OUT2:%[0-9]+]] = VGETLNi32 %[[DREG]], 1
%r0 = COPY %3
; CHECK: %r0 = COPY [[OUT1]]

View File

@ -258,8 +258,10 @@ define arm_aapcscc double @test_double_aapcscc(double %p0, double %p1, double %p
; CHECK: [[FIP5:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[P5]]
; CHECK: [[VREGP5:%[0-9]+]](s64) = G_LOAD [[FIP5]](p0)
; CHECK: [[VREGV:%[0-9]+]](s64) = G_FADD [[VREGP1]], [[VREGP5]]
; LITTLE: [[VREGVLO:%[0-9]+]](s32), [[VREGVHI:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 0, 32
; BIG: [[VREGVHI:%[0-9]+]](s32), [[VREGVLO:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 0, 32
; LITTLE: [[VREGVLO:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 0
; LITTLE: [[VREGVHI:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 32
; BIG: [[VREGVHI:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 0
; BIG: [[VREGVLO:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 32
; CHECK-DAG: %r0 = COPY [[VREGVLO]]
; CHECK-DAG: %r1 = COPY [[VREGVHI]]
; CHECK: BX_RET 14, _, implicit %r0, implicit %r1
@ -303,8 +305,10 @@ define arm_aapcscc double @test_double_gap_aapcscc(float %filler, double %p0,
; CHECK: [[FIP1:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[P1]]
; CHECK: [[VREGP1:%[0-9]+]](s64) = G_LOAD [[FIP1]](p0)
; CHECK: [[VREGV:%[0-9]+]](s64) = G_FADD [[VREGP0]], [[VREGP1]]
; LITTLE: [[VREGVLO:%[0-9]+]](s32), [[VREGVHI:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 0, 32
; BIG: [[VREGVHI:%[0-9]+]](s32), [[VREGVLO:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 0, 32
; LITTLE: [[VREGVLO:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 0
; LITTLE: [[VREGVHI:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 32
; BIG: [[VREGVHI:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 0
; BIG: [[VREGVLO:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 32
; CHECK-DAG: %r0 = COPY [[VREGVLO]]
; CHECK-DAG: %r1 = COPY [[VREGVHI]]
; CHECK: BX_RET 14, _, implicit %r0, implicit %r1
@ -326,8 +330,10 @@ define arm_aapcscc double @test_double_gap2_aapcscc(double %p0, float %filler,
; CHECK: [[FIP1:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[P1]]
; CHECK: [[VREGP1:%[0-9]+]](s64) = G_LOAD [[FIP1]](p0)
; CHECK: [[VREGV:%[0-9]+]](s64) = G_FADD [[VREGP0]], [[VREGP1]]
; LITTLE: [[VREGVLO:%[0-9]+]](s32), [[VREGVHI:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 0, 32
; BIG: [[VREGVHI:%[0-9]+]](s32), [[VREGVLO:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 0, 32
; LITTLE: [[VREGVLO:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 0
; LITTLE: [[VREGVHI:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 32
; BIG: [[VREGVHI:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 0
; BIG: [[VREGVLO:%[0-9]+]](s32) = G_EXTRACT [[VREGV]](s64), 32
; CHECK-DAG: %r0 = COPY [[VREGVLO]]
; CHECK-DAG: %r1 = COPY [[VREGVHI]]
; CHECK: BX_RET 14, _, implicit %r0, implicit %r1
@ -489,7 +495,8 @@ define arm_aapcscc double @test_call_aapcs_fp_params(double %a, float %b) {
; CHECK-DAG: [[BVREG:%[0-9]+]](s32) = COPY %r2
; CHECK: ADJCALLSTACKDOWN 16, 14, _, implicit-def %sp, implicit %sp
; CHECK-DAG: %r0 = COPY [[BVREG]]
; CHECK-DAG: [[A1:%[0-9]+]](s32), [[A2:%[0-9]+]](s32) = G_EXTRACT [[AVREG]](s64), 0, 32
; CHECK-DAG: [[A1:%[0-9]+]](s32) = G_EXTRACT [[AVREG]](s64), 0
; CHECK-DAG: [[A2:%[0-9]+]](s32) = G_EXTRACT [[AVREG]](s64), 32
; LITTLE-DAG: %r2 = COPY [[A1]]
; LITTLE-DAG: %r3 = COPY [[A2]]
; BIG-DAG: %r2 = COPY [[A2]]
@ -508,7 +515,8 @@ define arm_aapcscc double @test_call_aapcs_fp_params(double %a, float %b) {
; LITTLE: [[RVREG:%[0-9]+]](s64) = G_SEQUENCE [[R1]](s32), 0, [[R2]](s32), 32
; BIG: [[RVREG:%[0-9]+]](s64) = G_SEQUENCE [[R2]](s32), 0, [[R1]](s32), 32
; CHECK: ADJCALLSTACKUP 16, 0, 14, _, implicit-def %sp, implicit %sp
; CHECK: [[R1:%[0-9]+]](s32), [[R2:%[0-9]+]](s32) = G_EXTRACT [[RVREG]](s64), 0, 32
; CHECK: [[R1:%[0-9]+]](s32) = G_EXTRACT [[RVREG]](s64), 0
; CHECK: [[R2:%[0-9]+]](s32) = G_EXTRACT [[RVREG]](s64), 32
; LITTLE-DAG: %r0 = COPY [[R1]]
; LITTLE-DAG: %r1 = COPY [[R2]]
; BIG-DAG: %r0 = COPY [[R2]]

View File

@ -175,7 +175,8 @@ define arm_aapcscc double @test_double_softfp(double %f0, double %f1) {
; CHECK-DAG: vmov [[F0:d[0-9]+]], r0, r1
; CHECK-DAG: vmov [[F1:d[0-9]+]], r2, r3
; CHECK: vadd.f64 [[FV:d[0-9]+]], [[F0]], [[F1]]
; CHECK: vmov r0, r1, [[FV]]
; CHECK: vmov.32 r0, [[FV]][0]
; CHECK: vmov.32 r1, [[FV]][1]
; CHECK: bx lr
entry:
%v = fadd double %f0, %f1

View File

@ -320,7 +320,8 @@ body: |
%0(s32) = COPY %r0
%1(s32) = COPY %r1
%2(s64) = G_SEQUENCE %0(s32), 0, %1(s32), 32
%3(s32), %4(s32) = G_EXTRACT %2(s64), 0, 32
%3(s32) = G_EXTRACT %2(s64), 0
%4(s32) = G_EXTRACT %2(s64), 32
%r0 = COPY %3(s32)
%r1 = COPY %4(s32)
BX_RET 14, _, implicit %r0, implicit %r1

View File

@ -206,14 +206,25 @@ define i64 @test_i64_args_8(i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4,
; X32-NEXT: [[ARG8L:%[0-9]+]](s32) = G_LOAD [[ARG8L_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK56]], align 0)
; X32-NEXT: [[ARG8H_ADDR:%[0-9]+]](p0) = G_FRAME_INDEX %fixed-stack.[[STACK60]]
; X32-NEXT: [[ARG8H:%[0-9]+]](s32) = G_LOAD [[ARG8H_ADDR]](p0) :: (invariant load 4 from %fixed-stack.[[STACK60]], align 0)
; X32-NEXT: [[ARG1:%[0-9]+]](s64) = G_SEQUENCE [[ARG1L:%[0-9]+]](s32), 0, [[ARG1H:%[0-9]+]](s32), 32
; X32-NEXT: %{{[0-9]+}}(s64) = G_SEQUENCE %{{[0-9]+}}(s32), 0, %{{[0-9]+}}(s32), 32
; X32-NEXT: %{{[0-9]+}}(s64) = G_SEQUENCE %{{[0-9]+}}(s32), 0, %{{[0-9]+}}(s32), 32
; X32-NEXT: %{{[0-9]+}}(s64) = G_SEQUENCE %{{[0-9]+}}(s32), 0, %{{[0-9]+}}(s32), 32
; X32-NEXT: %{{[0-9]+}}(s64) = G_SEQUENCE %{{[0-9]+}}(s32), 0, %{{[0-9]+}}(s32), 32
; X32-NEXT: %{{[0-9]+}}(s64) = G_SEQUENCE %{{[0-9]+}}(s32), 0, %{{[0-9]+}}(s32), 32
; X32-NEXT: [[ARG7:%[0-9]+]](s64) = G_SEQUENCE [[ARG7L:%[0-9]+]](s32), 0, [[ARG7H:%[0-9]+]](s32), 32
; X32-NEXT: [[ARG8:%[0-9]+]](s64) = G_SEQUENCE [[ARG8L:%[0-9]+]](s32), 0, [[ARG8H:%[0-9]+]](s32), 32
; X32-NEXT: [[UNDEF:%[0-9]+]](s64) = IMPLICIT_DEF
; X32-NEXT: [[ARG1_TMP0:%[0-9]+]](s64) = G_INSERT [[UNDEF]], [[ARG1L]](s32), 0
; X32-NEXT: [[ARG1_TMP1:%[0-9]+]](s64) = G_INSERT [[ARG1_TMP0]], [[ARG1H]](s32), 32
; X32-NEXT: [[ARG1:%[0-9]+]](s64) = COPY [[ARG1_TMP1]]
; ... a bunch more that we don't track ...
; X32: IMPLICIT_DEF
; X32: IMPLICIT_DEF
; X32: IMPLICIT_DEF
; X32: IMPLICIT_DEF
; X32: IMPLICIT_DEF
; X32: [[UNDEF:%[0-9]+]](s64) = IMPLICIT_DEF
; X32-NEXT: [[ARG7_TMP0:%[0-9]+]](s64) = G_INSERT [[UNDEF]], [[ARG7L]](s32), 0
; X32-NEXT: [[ARG7_TMP1:%[0-9]+]](s64) = G_INSERT [[ARG7_TMP0]], [[ARG7H]](s32), 32
; X32-NEXT: [[ARG7:%[0-9]+]](s64) = COPY [[ARG7_TMP1]]
; X32-NEXT: [[UNDEF:%[0-9]+]](s64) = IMPLICIT_DEF
; X32-NEXT: [[ARG8_TMP0:%[0-9]+]](s64) = G_INSERT [[UNDEF]], [[ARG8L]](s32), 0
; X32-NEXT: [[ARG8_TMP1:%[0-9]+]](s64) = G_INSERT [[ARG8_TMP0]], [[ARG8H]](s32), 32
; X32-NEXT: [[ARG8:%[0-9]+]](s64) = COPY [[ARG8_TMP1]]
; ALL-NEXT: [[GADDR_A1:%[0-9]+]](p0) = G_GLOBAL_VALUE @a1_64bit
; ALL-NEXT: [[GADDR_A7:%[0-9]+]](p0) = G_GLOBAL_VALUE @a7_64bit
@ -225,7 +236,8 @@ define i64 @test_i64_args_8(i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4,
; X64-NEXT: %rax = COPY [[ARG1]](s64)
; X64-NEXT: RET 0, implicit %rax
; X32-NEXT: [[RETL:%[0-9]+]](s32), [[RETH:%[0-9]+]](s32) = G_EXTRACT [[ARG1:%[0-9]+]](s64), 0, 32
; X32-NEXT: [[RETL:%[0-9]+]](s32) = G_EXTRACT [[ARG1:%[0-9]+]](s64), 0
; X32-NEXT: [[RETH:%[0-9]+]](s32) = G_EXTRACT [[ARG1:%[0-9]+]](s64), 32
; X32-NEXT: %eax = COPY [[RETL:%[0-9]+]](s32)
; X32-NEXT: %edx = COPY [[RETH:%[0-9]+]](s32)
; X32-NEXT: RET 0, implicit %eax, implicit %edx

View File

@ -15,8 +15,12 @@ define <8 x i32> @test_v8i32_args(<8 x i32> %arg1) {
; X64: liveins: %xmm0, %xmm1
; X64: [[ARG1L:%[0-9]+]](<4 x s32>) = COPY %xmm0
; X64-NEXT: [[ARG1H:%[0-9]+]](<4 x s32>) = COPY %xmm1
; X64-NEXT: [[ARG1:%[0-9]+]](<8 x s32>) = G_SEQUENCE [[ARG1L:%[0-9]+]](<4 x s32>), 0, [[ARG1H:%[0-9]+]](<4 x s32>), 128
; X64-NEXT: [[RETL:%[0-9]+]](<4 x s32>), [[RETH:%[0-9]+]](<4 x s32>) = G_EXTRACT [[ARG1:%[0-9]+]](<8 x s32>), 0, 128
; X64-NEXT: [[UNDEF:%[0-9]+]](<8 x s32>) = IMPLICIT_DEF
; X64-NEXT: [[ARG1_TMP0:%[0-9]+]](<8 x s32>) = G_INSERT [[UNDEF]], [[ARG1L]](<4 x s32>), 0
; X64-NEXT: [[ARG1_TMP1:%[0-9]+]](<8 x s32>) = G_INSERT [[ARG1_TMP0]], [[ARG1H]](<4 x s32>), 128
; X64-NEXT: [[ARG1:%[0-9]+]](<8 x s32>) = COPY [[ARG1_TMP1]]
; X64-NEXT: [[RETL:%[0-9]+]](<4 x s32>) = G_EXTRACT [[ARG1:%[0-9]+]](<8 x s32>), 0
; X64-NEXT: [[RETH:%[0-9]+]](<4 x s32>) = G_EXTRACT [[ARG1:%[0-9]+]](<8 x s32>), 128
; X64-NEXT: %xmm0 = COPY [[RETL:%[0-9]+]](<4 x s32>)
; X64-NEXT: %xmm1 = COPY [[RETH:%[0-9]+]](<4 x s32>)
; X64-NEXT: RET 0, implicit %xmm0, implicit %xmm1