Convert push_back loops into append calls.

No functionality change intended.

llvm-svn: 230849
This commit is contained in:
Benjamin Kramer 2015-02-28 13:20:15 +00:00
parent 42a7adf171
commit 5fbfe2ffdc
7 changed files with 16 additions and 30 deletions

View File

@ -11465,14 +11465,12 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
unsigned NumElts = OpVT.getVectorNumElements();
if (ISD::UNDEF == Op.getOpcode())
for (unsigned i = 0; i != NumElts; ++i)
Opnds.push_back(DAG.getUNDEF(MinVT));
Opnds.append(NumElts, DAG.getUNDEF(MinVT));
if (ISD::BUILD_VECTOR == Op.getOpcode()) {
if (SVT.isFloatingPoint()) {
assert(SVT == OpVT.getScalarType() && "Concat vector type mismatch");
for (unsigned i = 0; i != NumElts; ++i)
Opnds.push_back(Op.getOperand(i));
Opnds.append(Op->op_begin(), Op->op_begin() + NumElts);
} else {
for (unsigned i = 0; i != NumElts; ++i)
Opnds.push_back(

View File

@ -2971,10 +2971,7 @@ void ConstantExpr::replaceUsesOfWithOnConstant(Value *From, Value *ToV,
}
Instruction *ConstantExpr::getAsInstruction() {
SmallVector<Value*,4> ValueOperands;
for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
ValueOperands.push_back(cast<Value>(I));
SmallVector<Value *, 4> ValueOperands(op_begin(), op_end());
ArrayRef<Value*> Ops(ValueOperands);
switch (getOpcode()) {

View File

@ -3356,11 +3356,12 @@ SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
EVT VecVT;
EVT EltVT;
SDValue EltMask, VecVal1, VecVal2;
uint64_t EltMask;
SDValue VecVal1, VecVal2;
if (VT == MVT::f32 || VT == MVT::v2f32 || VT == MVT::v4f32) {
EltVT = MVT::i32;
VecVT = MVT::v4i32;
EltMask = DAG.getConstant(0x80000000ULL, EltVT);
EltMask = 0x80000000ULL;
if (!VT.isVector()) {
VecVal1 = DAG.getTargetInsertSubreg(AArch64::ssub, DL, VecVT,
@ -3378,7 +3379,7 @@ SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
// We want to materialize a mask with the the high bit set, but the AdvSIMD
// immediate moves cannot materialize that in a single instruction for
// 64-bit elements. Instead, materialize zero and then negate it.
EltMask = DAG.getConstant(0, EltVT);
EltMask = 0;
if (!VT.isVector()) {
VecVal1 = DAG.getTargetInsertSubreg(AArch64::dsub, DL, VecVT,
@ -3393,11 +3394,7 @@ SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
llvm_unreachable("Invalid type for copysign!");
}
std::vector<SDValue> BuildVectorOps;
for (unsigned i = 0; i < VecVT.getVectorNumElements(); ++i)
BuildVectorOps.push_back(EltMask);
SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, DL, VecVT, BuildVectorOps);
SDValue BuildVec = DAG.getConstant(EltMask, VecVT);
// If we couldn't materialize the mask above, then the mask vector will be
// the zero vector, and we need to negate it here.

View File

@ -1940,9 +1940,7 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
}
// Then any remaining arguments
for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i) {
Ops.push_back(N->getOperand(i));
}
Ops.append(N->op_begin() + 2, N->op_end());
SDValue NewSt = DAG.getMemIntrinsicNode(
Opcode, DL, DAG.getVTList(MVT::Other), Ops,

View File

@ -16255,7 +16255,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
Amt = DAG.getNode(ISD::ANY_EXTEND, dl, NewVT, Amt);
return DAG.getNode(ISD::TRUNCATE, dl, VT,
DAG.getNode(Op.getOpcode(), dl, NewVT, R, Amt));
}
}
// Decompose 256-bit shifts into smaller 128-bit shifts.
if (VT.is256BitVector()) {
@ -16271,12 +16271,9 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
SDValue Amt1, Amt2;
if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
// Constant shift amount
SmallVector<SDValue, 4> Amt1Csts;
SmallVector<SDValue, 4> Amt2Csts;
for (unsigned i = 0; i != NumElems/2; ++i)
Amt1Csts.push_back(Amt->getOperand(i));
for (unsigned i = NumElems/2; i != NumElems; ++i)
Amt2Csts.push_back(Amt->getOperand(i));
SmallVector<SDValue, 8> Ops(Amt->op_begin(), Amt->op_begin() + NumElems);
ArrayRef<SDValue> Amt1Csts = makeArrayRef(Ops).slice(0, NumElems / 2);
ArrayRef<SDValue> Amt2Csts = makeArrayRef(Ops).slice(NumElems / 2);
Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);

View File

@ -5291,8 +5291,8 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
return nullptr;
// Folding a normal load. Just copy the load's address operands.
for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i)
MOs.push_back(LoadMI->getOperand(i));
MOs.append(LoadMI->operands_begin() + NumOps - X86::AddrNumOperands,
LoadMI->operands_begin() + NumOps);
break;
}
}

View File

@ -623,8 +623,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
// Simple byval argument? Just add all the struct element types.
Type *AgTy = cast<PointerType>(I->getType())->getElementType();
StructType *STy = cast<StructType>(AgTy);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
Params.push_back(STy->getElementType(i));
Params.insert(Params.end(), STy->element_begin(), STy->element_end());
++NumByValArgsPromoted;
} else if (!ArgsToPromote.count(I)) {
// Unchanged argument