Revert "[SelectionDAG] Enable target specific vector scalarization of calls and returns"

This reverts commit r299766. This change appears to have broken the MIPS
buildbots. Reverting while I investigate.

Revert "[mips] Remove usage of debug only variable (NFC)"

This reverts commit r299769. Follow up commit.

llvm-svn: 299788
This commit is contained in:
Simon Dardis 2017-04-07 17:25:05 +00:00
parent bfad55fbc0
commit f7e4388e3b
15 changed files with 103 additions and 2120 deletions

View File

@ -662,16 +662,6 @@ public:
unsigned &NumIntermediates, unsigned &NumIntermediates,
MVT &RegisterVT) const; MVT &RegisterVT) const;
/// Certain targets such as MIPS require that some types such as vectors are
/// always broken down into scalars in some contexts. This occurs even if the
/// vector type is legal.
virtual unsigned getVectorTypeBreakdownForCallingConv(
LLVMContext &Context, EVT VT, EVT &IntermediateVT,
unsigned &NumIntermediates, MVT &RegisterVT) const {
return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates,
RegisterVT);
}
struct IntrinsicInfo { struct IntrinsicInfo {
unsigned opc = 0; // target opcode unsigned opc = 0; // target opcode
EVT memVT; // memory VT EVT memVT; // memory VT
@ -1012,33 +1002,6 @@ public:
llvm_unreachable("Unsupported extended type!"); llvm_unreachable("Unsupported extended type!");
} }
/// Certain combinations of ABIs, Targets and features require that types
/// are legal for some operations and not for other operations.
/// For MIPS all vector types must be passed through the integer register set.
virtual MVT getRegisterTypeForCallingConv(MVT VT) const {
return getRegisterType(VT);
}
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context,
EVT VT) const {
return getRegisterType(Context, VT);
}
/// Certain targets require unusual breakdowns of certain types. For MIPS,
/// this occurs when a vector type is used, as vector are passed through the
/// integer register set.
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context,
EVT VT) const {
return getNumRegisters(Context, VT);
}
/// Certain targets have context senstive alignment requirements, where one
/// type has the alignment requirement of another type.
virtual unsigned getABIAlignmentForCallingConv(Type *ArgTy,
DataLayout DL) const {
return DL.getABITypeAlignment(ArgTy);
}
/// If true, then instruction selection should seek to shrink the FP constant /// If true, then instruction selection should seek to shrink the FP constant
/// of the specified type to a smaller type in order to save space and / or /// of the specified type to a smaller type in order to save space and / or
/// reduce runtime. /// reduce runtime.

View File

@ -115,8 +115,7 @@ static const unsigned MaxParallelChains = 64;
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
const SDValue *Parts, unsigned NumParts, const SDValue *Parts, unsigned NumParts,
MVT PartVT, EVT ValueVT, const Value *V, MVT PartVT, EVT ValueVT, const Value *V);
bool IsABIRegCopy);
/// getCopyFromParts - Create a value that contains the specified legal parts /// getCopyFromParts - Create a value that contains the specified legal parts
/// combined into the value they represent. If the parts combine to a type /// combined into the value they represent. If the parts combine to a type
@ -126,11 +125,10 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
const SDValue *Parts, unsigned NumParts, const SDValue *Parts, unsigned NumParts,
MVT PartVT, EVT ValueVT, const Value *V, MVT PartVT, EVT ValueVT, const Value *V,
Optional<ISD::NodeType> AssertOp = None, Optional<ISD::NodeType> AssertOp = None) {
bool IsABIRegCopy = false) {
if (ValueVT.isVector()) if (ValueVT.isVector())
return getCopyFromPartsVector(DAG, DL, Parts, NumParts, return getCopyFromPartsVector(DAG, DL, Parts, NumParts,
PartVT, ValueVT, V, IsABIRegCopy); PartVT, ValueVT, V);
assert(NumParts > 0 && "No parts to assemble!"); assert(NumParts > 0 && "No parts to assemble!");
const TargetLowering &TLI = DAG.getTargetLoweringInfo(); const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@ -274,8 +272,7 @@ static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
/// ValueVT (ISD::AssertSext). /// ValueVT (ISD::AssertSext).
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
const SDValue *Parts, unsigned NumParts, const SDValue *Parts, unsigned NumParts,
MVT PartVT, EVT ValueVT, const Value *V, MVT PartVT, EVT ValueVT, const Value *V) {
bool IsABIRegCopy) {
assert(ValueVT.isVector() && "Not a vector value"); assert(ValueVT.isVector() && "Not a vector value");
assert(NumParts > 0 && "No parts to assemble!"); assert(NumParts > 0 && "No parts to assemble!");
const TargetLowering &TLI = DAG.getTargetLoweringInfo(); const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@ -286,18 +283,9 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
EVT IntermediateVT; EVT IntermediateVT;
MVT RegisterVT; MVT RegisterVT;
unsigned NumIntermediates; unsigned NumIntermediates;
unsigned NumRegs; unsigned NumRegs =
TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
if (IsABIRegCopy) { NumIntermediates, RegisterVT);
NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
*DAG.getContext(), ValueVT, IntermediateVT, NumIntermediates,
RegisterVT);
} else {
NumRegs =
TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
NumIntermediates, RegisterVT);
}
assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!"); assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
NumParts = NumRegs; // Silence a compiler warning. NumParts = NumRegs; // Silence a compiler warning.
assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!"); assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
@ -326,14 +314,9 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
// Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
// intermediate operands. // intermediate operands.
EVT BuiltVectorTy =
EVT::getVectorVT(*DAG.getContext(), IntermediateVT.getScalarType(),
(IntermediateVT.isVector()
? IntermediateVT.getVectorNumElements() * NumParts
: NumIntermediates));
Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
: ISD::BUILD_VECTOR, : ISD::BUILD_VECTOR,
DL, BuiltVectorTy, Ops); DL, ValueVT, Ops);
} }
// There is now one part, held in Val. Correct it to match ValueVT. // There is now one part, held in Val. Correct it to match ValueVT.
@ -372,30 +355,13 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
TLI.isTypeLegal(ValueVT)) TLI.isTypeLegal(ValueVT))
return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
// Handle cases such as i8 -> <1 x i1>
if (ValueVT.getVectorNumElements() != 1) { if (ValueVT.getVectorNumElements() != 1) {
diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
// Certain ABIs require that vectors are passed as integers. For vectors "non-trivial scalar-to-vector conversion");
// are the same size, this is an obvious bitcast.
if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
} else if (ValueVT.getSizeInBits() < PartEVT.getSizeInBits()) {
// Bitcast Val back the original type and extract the corresponding
// vector we want.
unsigned Elts = PartEVT.getSizeInBits() / ValueVT.getScalarSizeInBits();
EVT WiderVecType = EVT::getVectorVT(*DAG.getContext(),
ValueVT.getVectorElementType(), Elts);
Val = DAG.getBitcast(WiderVecType, Val);
return DAG.getNode(
ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
}
diagnosePossiblyInvalidConstraint(
*DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
return DAG.getUNDEF(ValueVT); return DAG.getUNDEF(ValueVT);
} }
// Handle cases such as i8 -> <1 x i1>
if (ValueVT.getVectorNumElements() == 1 && if (ValueVT.getVectorNumElements() == 1 &&
ValueVT.getVectorElementType() != PartEVT) ValueVT.getVectorElementType() != PartEVT)
Val = DAG.getAnyExtOrTrunc(Val, DL, ValueVT.getScalarType()); Val = DAG.getAnyExtOrTrunc(Val, DL, ValueVT.getScalarType());
@ -405,7 +371,7 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
SDValue Val, SDValue *Parts, unsigned NumParts, SDValue Val, SDValue *Parts, unsigned NumParts,
MVT PartVT, const Value *V, bool IsABIRegCopy); MVT PartVT, const Value *V);
/// getCopyToParts - Create a series of nodes that contain the specified value /// getCopyToParts - Create a series of nodes that contain the specified value
/// split into legal parts. If the parts contain more bits than Val, then, for /// split into legal parts. If the parts contain more bits than Val, then, for
@ -413,14 +379,12 @@ static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
SDValue *Parts, unsigned NumParts, MVT PartVT, SDValue *Parts, unsigned NumParts, MVT PartVT,
const Value *V, const Value *V,
ISD::NodeType ExtendKind = ISD::ANY_EXTEND, ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
bool IsABIRegCopy = false) {
EVT ValueVT = Val.getValueType(); EVT ValueVT = Val.getValueType();
// Handle the vector case separately. // Handle the vector case separately.
if (ValueVT.isVector()) if (ValueVT.isVector())
return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V, return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V);
IsABIRegCopy);
unsigned PartBits = PartVT.getSizeInBits(); unsigned PartBits = PartVT.getSizeInBits();
unsigned OrigNumParts = NumParts; unsigned OrigNumParts = NumParts;
@ -545,9 +509,7 @@ static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
/// value split into legal parts. /// value split into legal parts.
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL, static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
SDValue Val, SDValue *Parts, unsigned NumParts, SDValue Val, SDValue *Parts, unsigned NumParts,
MVT PartVT, const Value *V, MVT PartVT, const Value *V) {
bool IsABIRegCopy) {
EVT ValueVT = Val.getValueType(); EVT ValueVT = Val.getValueType();
assert(ValueVT.isVector() && "Not a vector"); assert(ValueVT.isVector() && "Not a vector");
const TargetLowering &TLI = DAG.getTargetLoweringInfo(); const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@ -588,22 +550,15 @@ static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
// Promoted vector extract // Promoted vector extract
Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT); Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
} else { } else{
// Vector -> scalar conversion. // Vector -> scalar conversion.
if (ValueVT.getVectorNumElements() == 1) { assert(ValueVT.getVectorNumElements() == 1 &&
Val = DAG.getNode( "Only trivial vector-to-scalar conversions should get here!");
ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val, Val = DAG.getNode(
DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))); ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT); Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
} else {
assert(PartVT.getSizeInBits() > ValueVT.getSizeInBits() &&
"lossy conversion of vector to scalar type");
EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(),
ValueVT.getSizeInBits());
Val = DAG.getBitcast(IntermediateType, Val);
Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
}
} }
Parts[0] = Val; Parts[0] = Val;
@ -614,31 +569,15 @@ static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
EVT IntermediateVT; EVT IntermediateVT;
MVT RegisterVT; MVT RegisterVT;
unsigned NumIntermediates; unsigned NumIntermediates;
unsigned NumRegs; unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
if (IsABIRegCopy) { IntermediateVT,
NumRegs = TLI.getVectorTypeBreakdownForCallingConv( NumIntermediates, RegisterVT);
*DAG.getContext(), ValueVT, IntermediateVT, NumIntermediates,
RegisterVT);
} else {
NumRegs =
TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
NumIntermediates, RegisterVT);
}
unsigned NumElements = ValueVT.getVectorNumElements(); unsigned NumElements = ValueVT.getVectorNumElements();
assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!"); assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
NumParts = NumRegs; // Silence a compiler warning. NumParts = NumRegs; // Silence a compiler warning.
assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!"); assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
// Convert the vector to the appropiate type if necessary.
unsigned DestVectorNoElts =
NumIntermediates *
(IntermediateVT.isVector() ? IntermediateVT.getVectorNumElements() : 1);
EVT BuiltVectorTy = EVT::getVectorVT(
*DAG.getContext(), IntermediateVT.getScalarType(), DestVectorNoElts);
if (Val.getValueType() != BuiltVectorTy)
Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
// Split the vector into intermediate operands. // Split the vector into intermediate operands.
SmallVector<SDValue, 8> Ops(NumIntermediates); SmallVector<SDValue, 8> Ops(NumIntermediates);
for (unsigned i = 0; i != NumIntermediates; ++i) { for (unsigned i = 0; i != NumIntermediates; ++i) {
@ -671,31 +610,22 @@ static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
} }
} }
RegsForValue::RegsForValue() { IsABIMangled = false; } RegsForValue::RegsForValue() {}
RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt, RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
EVT valuevt, bool IsABIMangledValue) EVT valuevt)
: ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs), : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
RegCount(1, regs.size()), IsABIMangled(IsABIMangledValue) {}
RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI, RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
const DataLayout &DL, unsigned Reg, Type *Ty, const DataLayout &DL, unsigned Reg, Type *Ty) {
bool IsABIMangledValue) {
ComputeValueVTs(TLI, DL, Ty, ValueVTs); ComputeValueVTs(TLI, DL, Ty, ValueVTs);
IsABIMangled = IsABIMangledValue;
for (EVT ValueVT : ValueVTs) { for (EVT ValueVT : ValueVTs) {
unsigned NumRegs = IsABIMangledValue unsigned NumRegs = TLI.getNumRegisters(Context, ValueVT);
? TLI.getNumRegistersForCallingConv(Context, ValueVT) MVT RegisterVT = TLI.getRegisterType(Context, ValueVT);
: TLI.getNumRegisters(Context, ValueVT);
MVT RegisterVT = IsABIMangledValue
? TLI.getRegisterTypeForCallingConv(Context, ValueVT)
: TLI.getRegisterType(Context, ValueVT);
for (unsigned i = 0; i != NumRegs; ++i) for (unsigned i = 0; i != NumRegs; ++i)
Regs.push_back(Reg + i); Regs.push_back(Reg + i);
RegVTs.push_back(RegisterVT); RegVTs.push_back(RegisterVT);
RegCount.push_back(NumRegs);
Reg += NumRegs; Reg += NumRegs;
} }
} }
@ -716,10 +646,8 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
// Copy the legal parts from the registers. // Copy the legal parts from the registers.
EVT ValueVT = ValueVTs[Value]; EVT ValueVT = ValueVTs[Value];
unsigned NumRegs = RegCount[Value]; unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
MVT RegisterVT = IsABIMangled MVT RegisterVT = RegVTs[Value];
? TLI.getRegisterTypeForCallingConv(RegVTs[Value])
: RegVTs[Value];
Parts.resize(NumRegs); Parts.resize(NumRegs);
for (unsigned i = 0; i != NumRegs; ++i) { for (unsigned i = 0; i != NumRegs; ++i) {
@ -814,11 +742,9 @@ void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
unsigned NumRegs = Regs.size(); unsigned NumRegs = Regs.size();
SmallVector<SDValue, 8> Parts(NumRegs); SmallVector<SDValue, 8> Parts(NumRegs);
for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
unsigned NumParts = RegCount[Value]; EVT ValueVT = ValueVTs[Value];
unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
MVT RegisterVT = IsABIMangled MVT RegisterVT = RegVTs[Value];
? TLI.getRegisterTypeForCallingConv(RegVTs[Value])
: RegVTs[Value];
if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT)) if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
ExtendKind = ISD::ZERO_EXTEND; ExtendKind = ISD::ZERO_EXTEND;
@ -1041,16 +967,10 @@ SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
if (It != FuncInfo.ValueMap.end()) { if (It != FuncInfo.ValueMap.end()) {
unsigned InReg = It->second; unsigned InReg = It->second;
bool IsABIRegCopy =
V && ((isa<CallInst>(V) &&
!(static_cast<const CallInst *>(V))->isInlineAsm()) ||
isa<ReturnInst>(V));
RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(), RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
DAG.getDataLayout(), InReg, Ty, IsABIRegCopy); DAG.getDataLayout(), InReg, Ty);
SDValue Chain = DAG.getEntryNode(); SDValue Chain = DAG.getEntryNode();
Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
V);
resolveDanglingDebugInfo(V, Result); resolveDanglingDebugInfo(V, Result);
} }
@ -1237,13 +1157,8 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
// If this is an instruction which fast-isel has deferred, select it now. // If this is an instruction which fast-isel has deferred, select it now.
if (const Instruction *Inst = dyn_cast<Instruction>(V)) { if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
unsigned InReg = FuncInfo.InitializeRegForValue(Inst); unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
bool IsABIRegCopy =
V && ((isa<CallInst>(V) &&
!(static_cast<const CallInst *>(V))->isInlineAsm()) ||
isa<ReturnInst>(V));
RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg, RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
Inst->getType(), IsABIRegCopy); Inst->getType());
SDValue Chain = DAG.getEntryNode(); SDValue Chain = DAG.getEntryNode();
return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V); return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
} }
@ -1471,12 +1386,12 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind); VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, VT); unsigned NumParts = TLI.getNumRegisters(Context, VT);
MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, VT); MVT PartVT = TLI.getRegisterType(Context, VT);
SmallVector<SDValue, 4> Parts(NumParts); SmallVector<SDValue, 4> Parts(NumParts);
getCopyToParts(DAG, getCurSDLoc(), getCopyToParts(DAG, getCurSDLoc(),
SDValue(RetOp.getNode(), RetOp.getResNo() + j), SDValue(RetOp.getNode(), RetOp.getResNo() + j),
&Parts[0], NumParts, PartVT, &I, ExtendKind, true); &Parts[0], NumParts, PartVT, &I, ExtendKind);
// 'inreg' on function refers to return value // 'inreg' on function refers to return value
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
@ -7149,8 +7064,8 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
SDLoc dl = getCurSDLoc(); SDLoc dl = getCurSDLoc();
// Use the produced MatchedRegs object to // Use the produced MatchedRegs object to
MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag, MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl,
CS.getInstruction()); Chain, &Flag, CS.getInstruction());
MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
true, OpInfo.getMatchedOperand(), dl, true, OpInfo.getMatchedOperand(), dl,
DAG, AsmNodeOperands); DAG, AsmNodeOperands);
@ -7766,10 +7681,8 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
} else { } else {
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
EVT VT = RetTys[I]; EVT VT = RetTys[I];
MVT RegisterVT = MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
getRegisterTypeForCallingConv(CLI.RetTy->getContext(), VT); unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
unsigned NumRegs =
getNumRegistersForCallingConv(CLI.RetTy->getContext(), VT);
for (unsigned i = 0; i != NumRegs; ++i) { for (unsigned i = 0; i != NumRegs; ++i) {
ISD::InputArg MyFlags; ISD::InputArg MyFlags;
MyFlags.VT = RegisterVT; MyFlags.VT = RegisterVT;
@ -7818,11 +7731,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
SDValue Op = SDValue(Args[i].Node.getNode(), SDValue Op = SDValue(Args[i].Node.getNode(),
Args[i].Node.getResNo() + Value); Args[i].Node.getResNo() + Value);
ISD::ArgFlagsTy Flags; ISD::ArgFlagsTy Flags;
unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
// Certain targets (such as MIPS), may have a different ABI alignment
// for a type depending on the context. Give the target a chance to
// specify the alignment it wants.
unsigned OriginalAlignment = getABIAlignmentForCallingConv(ArgTy, DL);
if (Args[i].IsZExt) if (Args[i].IsZExt)
Flags.setZExt(); Flags.setZExt();
@ -7877,9 +7786,8 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
Flags.setInConsecutiveRegs(); Flags.setInConsecutiveRegs();
Flags.setOrigAlign(OriginalAlignment); Flags.setOrigAlign(OriginalAlignment);
MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(), VT); MVT PartVT = getRegisterType(CLI.RetTy->getContext(), VT);
unsigned NumParts = unsigned NumParts = getNumRegisters(CLI.RetTy->getContext(), VT);
getNumRegistersForCallingConv(CLI.RetTy->getContext(), VT);
SmallVector<SDValue, 4> Parts(NumParts); SmallVector<SDValue, 4> Parts(NumParts);
ISD::NodeType ExtendKind = ISD::ANY_EXTEND; ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
@ -7909,8 +7817,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
} }
getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT,
CLI.CS ? CLI.CS->getInstruction() : nullptr, ExtendKind, CLI.CS ? CLI.CS->getInstruction() : nullptr, ExtendKind);
true);
for (unsigned j = 0; j != NumParts; ++j) { for (unsigned j = 0; j != NumParts; ++j) {
// if it isn't first piece, alignment must be 1 // if it isn't first piece, alignment must be 1
@ -8010,14 +7917,12 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
unsigned CurReg = 0; unsigned CurReg = 0;
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
EVT VT = RetTys[I]; EVT VT = RetTys[I];
MVT RegisterVT = MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
getRegisterTypeForCallingConv(CLI.RetTy->getContext(), VT); unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
unsigned NumRegs =
getNumRegistersForCallingConv(CLI.RetTy->getContext(), VT);
ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg], ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
NumRegs, RegisterVT, VT, nullptr, NumRegs, RegisterVT, VT, nullptr,
AssertOp, true)); AssertOp));
CurReg += NumRegs; CurReg += NumRegs;
} }
@ -8053,15 +7958,8 @@ SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg"); assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
const TargetLowering &TLI = DAG.getTargetLoweringInfo(); const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// If this is an InlineAsm we have to match the registers required, not the
// notional registers required by the type.
bool IsABIRegCopy =
V && ((isa<CallInst>(V) &&
!(static_cast<const CallInst *>(V))->isInlineAsm()) ||
isa<ReturnInst>(V));
RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
V->getType(), IsABIRegCopy); V->getType());
SDValue Chain = DAG.getEntryNode(); SDValue Chain = DAG.getEntryNode();
ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) == ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) ==
@ -8304,12 +8202,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
EVT VT = ValueVTs[Value]; EVT VT = ValueVTs[Value];
Type *ArgTy = VT.getTypeForEVT(*DAG.getContext()); Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
ISD::ArgFlagsTy Flags; ISD::ArgFlagsTy Flags;
unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
// Certain targets (such as MIPS), may have a different ABI alignment
// for a type depending on the context. Give the target a chance to
// specify the alignment it wants.
unsigned OriginalAlignment =
TLI->getABIAlignmentForCallingConv(ArgTy, DL);
if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt)) if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
Flags.setZExt(); Flags.setZExt();
@ -8371,10 +8264,8 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
if (ArgCopyElisionCandidates.count(&Arg)) if (ArgCopyElisionCandidates.count(&Arg))
Flags.setCopyElisionCandidate(); Flags.setCopyElisionCandidate();
MVT RegisterVT = MVT RegisterVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(), VT); unsigned NumRegs = TLI->getNumRegisters(*CurDAG->getContext(), VT);
unsigned NumRegs =
TLI->getNumRegistersForCallingConv(*CurDAG->getContext(), VT);
for (unsigned i = 0; i != NumRegs; ++i) { for (unsigned i = 0; i != NumRegs; ++i) {
ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed, ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
Idx-1, PartBase+i*RegisterVT.getStoreSize()); Idx-1, PartBase+i*RegisterVT.getStoreSize());
@ -8481,10 +8372,8 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
for (unsigned Val = 0; Val != NumValues; ++Val) { for (unsigned Val = 0; Val != NumValues; ++Val) {
EVT VT = ValueVTs[Val]; EVT VT = ValueVTs[Val];
MVT PartVT = MVT PartVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(), VT); unsigned NumParts = TLI->getNumRegisters(*CurDAG->getContext(), VT);
unsigned NumParts =
TLI->getNumRegistersForCallingConv(*CurDAG->getContext(), VT);
// Even an apparant 'unused' swifterror argument needs to be returned. So // Even an apparant 'unused' swifterror argument needs to be returned. So
// we do generate a copy for it that can be used on return from the // we do generate a copy for it that can be used on return from the
@ -8497,8 +8386,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
AssertOp = ISD::AssertZext; AssertOp = ISD::AssertZext;
ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts, ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
PartVT, VT, nullptr, AssertOp, PartVT, VT, nullptr, AssertOp));
true));
} }
i += NumParts; i += NumParts;

View File

@ -973,28 +973,18 @@ struct RegsForValue {
/// expanded value requires multiple registers. /// expanded value requires multiple registers.
SmallVector<unsigned, 4> Regs; SmallVector<unsigned, 4> Regs;
/// This list holds the number of registers for each value.
SmallVector<unsigned, 4> RegCount;
/// Records if this value needs to be treated in an ABI dependant manner,
/// different to normal type legalization.
bool IsABIMangled;
RegsForValue(); RegsForValue();
RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt, EVT valuevt, RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt, EVT valuevt);
bool IsABIMangledValue = false);
RegsForValue(LLVMContext &Context, const TargetLowering &TLI, RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
const DataLayout &DL, unsigned Reg, Type *Ty, const DataLayout &DL, unsigned Reg, Type *Ty);
bool IsABIMangledValue = false);
/// Add the specified values to this one. /// Add the specified values to this one.
void append(const RegsForValue &RHS) { void append(const RegsForValue &RHS) {
ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end()); ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end()); RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
Regs.append(RHS.Regs.begin(), RHS.Regs.end()); Regs.append(RHS.Regs.begin(), RHS.Regs.end());
RegCount.push_back(RHS.Regs.size());
} }
/// Emit a series of CopyFromReg nodes that copies from this value and returns /// Emit a series of CopyFromReg nodes that copies from this value and returns

View File

@ -835,7 +835,7 @@ SelectionDAGBuilder::LowerStatepoint(ImmutableStatepoint ISP,
// completely and make statepoint call to return a tuple. // completely and make statepoint call to return a tuple.
unsigned Reg = FuncInfo.CreateRegs(RetTy); unsigned Reg = FuncInfo.CreateRegs(RetTy);
RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(), RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
DAG.getDataLayout(), Reg, RetTy, true); DAG.getDataLayout(), Reg, RetTy);
SDValue Chain = DAG.getEntryNode(); SDValue Chain = DAG.getEntryNode();
RFV.getCopyToRegs(ReturnValue, DAG, getCurSDLoc(), Chain, nullptr); RFV.getCopyToRegs(ReturnValue, DAG, getCurSDLoc(), Chain, nullptr);

View File

@ -1616,10 +1616,8 @@ void llvm::GetReturnInfo(Type *ReturnType, AttributeList attr,
VT = MinVT; VT = MinVT;
} }
unsigned NumParts = unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
TLI.getNumRegistersForCallingConv(ReturnType->getContext(), VT); MVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
MVT PartVT =
TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), VT);
// 'inreg' on function refers to return value // 'inreg' on function refers to return value
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();

View File

@ -54,22 +54,6 @@ static bool originalTypeIsF128(Type *Ty, const SDNode *CallNode) {
return (ES && Ty->isIntegerTy(128) && isF128SoftLibCall(ES->getSymbol())); return (ES && Ty->isIntegerTy(128) && isF128SoftLibCall(ES->getSymbol()));
} }
/// Return true if the original type was vXfXX.
static bool originalEVTTypeIsVectorFloat(EVT Ty) {
if (Ty.isVector() && Ty.getVectorElementType().isFloatingPoint())
return true;
return false;
}
/// Return true if the original type was vXfXX / vXfXX.
static bool originalTypeIsVectorFloat(Type * Ty) {
if (Ty->isVectorTy() && Ty->isFPOrFPVectorTy())
return true;
return false;
}
MipsCCState::SpecialCallingConvType MipsCCState::SpecialCallingConvType
MipsCCState::getSpecialCallingConvForCallee(const SDNode *Callee, MipsCCState::getSpecialCallingConvForCallee(const SDNode *Callee,
const MipsSubtarget &Subtarget) { const MipsSubtarget &Subtarget) {
@ -97,8 +81,8 @@ void MipsCCState::PreAnalyzeCallResultForF128(
} }
} }
/// Identify lowered values that originated from f128 or float arguments and /// Identify lowered values that originated from f128 arguments and record
/// record this for use by RetCC_MipsN. /// this for use by RetCC_MipsN.
void MipsCCState::PreAnalyzeReturnForF128( void MipsCCState::PreAnalyzeReturnForF128(
const SmallVectorImpl<ISD::OutputArg> &Outs) { const SmallVectorImpl<ISD::OutputArg> &Outs) {
const MachineFunction &MF = getMachineFunction(); const MachineFunction &MF = getMachineFunction();
@ -110,50 +94,26 @@ void MipsCCState::PreAnalyzeReturnForF128(
} }
} }
/// Identify lower values that originated from vXfXX and record /// Identify lowered values that originated from f128 arguments and record
/// this. /// this.
void MipsCCState::PreAnalyzeCallResultForVectorFloat(
const SmallVectorImpl<ISD::InputArg> &Ins,
const TargetLowering::CallLoweringInfo &CLI) {
for (unsigned i = 0; i < Ins.size(); ++i) {
OriginalRetWasFloatVector.push_back(
originalTypeIsVectorFloat(CLI.RetTy));
}
}
/// Identify lowered values that originated from vXfXX arguments and record
/// this.
void MipsCCState::PreAnalyzeReturnForVectorFloat(
const SmallVectorImpl<ISD::OutputArg> &Outs) {
for (unsigned i = 0; i < Outs.size(); ++i) {
ISD::OutputArg Out = Outs[i];
OriginalRetWasFloatVector.push_back(
originalEVTTypeIsVectorFloat(Out.ArgVT));
}
}
/// Identify lowered values that originated from f128, float and sret to vXfXX
/// arguments and record this.
void MipsCCState::PreAnalyzeCallOperands( void MipsCCState::PreAnalyzeCallOperands(
const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<ISD::OutputArg> &Outs,
std::vector<TargetLowering::ArgListEntry> &FuncArgs, std::vector<TargetLowering::ArgListEntry> &FuncArgs,
const SDNode *CallNode) { const SDNode *CallNode) {
for (unsigned i = 0; i < Outs.size(); ++i) { for (unsigned i = 0; i < Outs.size(); ++i) {
TargetLowering::ArgListEntry FuncArg = FuncArgs[Outs[i].OrigArgIndex]; OriginalArgWasF128.push_back(
originalTypeIsF128(FuncArgs[Outs[i].OrigArgIndex].Ty, CallNode));
OriginalArgWasF128.push_back(originalTypeIsF128(FuncArg.Ty, CallNode)); OriginalArgWasFloat.push_back(
OriginalArgWasFloat.push_back(FuncArg.Ty->isFloatingPointTy()); FuncArgs[Outs[i].OrigArgIndex].Ty->isFloatingPointTy());
OriginalArgWasFloatVector.push_back(FuncArg.Ty->isVectorTy());
CallOperandIsFixed.push_back(Outs[i].IsFixed); CallOperandIsFixed.push_back(Outs[i].IsFixed);
} }
} }
/// Identify lowered values that originated from f128, float and vXfXX arguments /// Identify lowered values that originated from f128 arguments and record
/// and record this. /// this.
void MipsCCState::PreAnalyzeFormalArgumentsForF128( void MipsCCState::PreAnalyzeFormalArgumentsForF128(
const SmallVectorImpl<ISD::InputArg> &Ins) { const SmallVectorImpl<ISD::InputArg> &Ins) {
const MachineFunction &MF = getMachineFunction(); const MachineFunction &MF = getMachineFunction();
for (unsigned i = 0; i < Ins.size(); ++i) { for (unsigned i = 0; i < Ins.size(); ++i) {
Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin();
@ -163,7 +123,6 @@ void MipsCCState::PreAnalyzeFormalArgumentsForF128(
if (Ins[i].Flags.isSRet()) { if (Ins[i].Flags.isSRet()) {
OriginalArgWasF128.push_back(false); OriginalArgWasF128.push_back(false);
OriginalArgWasFloat.push_back(false); OriginalArgWasFloat.push_back(false);
OriginalArgWasFloatVector.push_back(false);
continue; continue;
} }
@ -173,10 +132,5 @@ void MipsCCState::PreAnalyzeFormalArgumentsForF128(
OriginalArgWasF128.push_back( OriginalArgWasF128.push_back(
originalTypeIsF128(FuncArg->getType(), nullptr)); originalTypeIsF128(FuncArg->getType(), nullptr));
OriginalArgWasFloat.push_back(FuncArg->getType()->isFloatingPointTy()); OriginalArgWasFloat.push_back(FuncArg->getType()->isFloatingPointTy());
// The MIPS vector ABI exhibits a corner case of sorts or quirk; if the
// first argument is actually an SRet pointer to a vector, then the next
// argument slot is $a2.
OriginalArgWasFloatVector.push_back(FuncArg->getType()->isVectorTy());
} }
} }

View File

@ -45,33 +45,16 @@ private:
const SDNode *CallNode); const SDNode *CallNode);
/// Identify lowered values that originated from f128 arguments and record /// Identify lowered values that originated from f128 arguments and record
/// this for use by RetCC_MipsN. /// this.
void void
PreAnalyzeFormalArgumentsForF128(const SmallVectorImpl<ISD::InputArg> &Ins); PreAnalyzeFormalArgumentsForF128(const SmallVectorImpl<ISD::InputArg> &Ins);
void PreAnalyzeCallResultForVectorFloat(
const SmallVectorImpl<ISD::InputArg> &Ins,
const TargetLowering::CallLoweringInfo &CLI);
void PreAnalyzeFormalArgumentsForVectorFloat(
const SmallVectorImpl<ISD::InputArg> &Ins);
void
PreAnalyzeReturnForVectorFloat(const SmallVectorImpl<ISD::OutputArg> &Outs);
/// Records whether the value has been lowered from an f128. /// Records whether the value has been lowered from an f128.
SmallVector<bool, 4> OriginalArgWasF128; SmallVector<bool, 4> OriginalArgWasF128;
/// Records whether the value has been lowered from float. /// Records whether the value has been lowered from float.
SmallVector<bool, 4> OriginalArgWasFloat; SmallVector<bool, 4> OriginalArgWasFloat;
/// Records whether the value has been lowered from a floating point vector.
SmallVector<bool, 4> OriginalArgWasFloatVector;
/// Records whether the return value has been lowered from a floating point
/// vector.
SmallVector<bool, 4> OriginalRetWasFloatVector;
/// Records whether the value was a fixed argument. /// Records whether the value was a fixed argument.
/// See ISD::OutputArg::IsFixed, /// See ISD::OutputArg::IsFixed,
SmallVector<bool, 4> CallOperandIsFixed; SmallVector<bool, 4> CallOperandIsFixed;
@ -95,7 +78,6 @@ public:
CCState::AnalyzeCallOperands(Outs, Fn); CCState::AnalyzeCallOperands(Outs, Fn);
OriginalArgWasF128.clear(); OriginalArgWasF128.clear();
OriginalArgWasFloat.clear(); OriginalArgWasFloat.clear();
OriginalArgWasFloatVector.clear();
CallOperandIsFixed.clear(); CallOperandIsFixed.clear();
} }
@ -114,38 +96,31 @@ public:
CCState::AnalyzeFormalArguments(Ins, Fn); CCState::AnalyzeFormalArguments(Ins, Fn);
OriginalArgWasFloat.clear(); OriginalArgWasFloat.clear();
OriginalArgWasF128.clear(); OriginalArgWasF128.clear();
OriginalArgWasFloatVector.clear();
} }
void AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins, void AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
CCAssignFn Fn, CCAssignFn Fn,
const TargetLowering::CallLoweringInfo &CLI) { const TargetLowering::CallLoweringInfo &CLI) {
PreAnalyzeCallResultForF128(Ins, CLI); PreAnalyzeCallResultForF128(Ins, CLI);
PreAnalyzeCallResultForVectorFloat(Ins, CLI);
CCState::AnalyzeCallResult(Ins, Fn); CCState::AnalyzeCallResult(Ins, Fn);
OriginalArgWasFloat.clear(); OriginalArgWasFloat.clear();
OriginalArgWasF128.clear(); OriginalArgWasF128.clear();
OriginalArgWasFloatVector.clear();
} }
void AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs, void AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn) { CCAssignFn Fn) {
PreAnalyzeReturnForF128(Outs); PreAnalyzeReturnForF128(Outs);
PreAnalyzeReturnForVectorFloat(Outs);
CCState::AnalyzeReturn(Outs, Fn); CCState::AnalyzeReturn(Outs, Fn);
OriginalArgWasFloat.clear(); OriginalArgWasFloat.clear();
OriginalArgWasF128.clear(); OriginalArgWasF128.clear();
OriginalArgWasFloatVector.clear();
} }
bool CheckReturn(const SmallVectorImpl<ISD::OutputArg> &ArgsFlags, bool CheckReturn(const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
CCAssignFn Fn) { CCAssignFn Fn) {
PreAnalyzeReturnForF128(ArgsFlags); PreAnalyzeReturnForF128(ArgsFlags);
PreAnalyzeReturnForVectorFloat(ArgsFlags);
bool Return = CCState::CheckReturn(ArgsFlags, Fn); bool Return = CCState::CheckReturn(ArgsFlags, Fn);
OriginalArgWasFloat.clear(); OriginalArgWasFloat.clear();
OriginalArgWasF128.clear(); OriginalArgWasF128.clear();
OriginalArgWasFloatVector.clear();
return Return; return Return;
} }
@ -153,13 +128,6 @@ public:
bool WasOriginalArgFloat(unsigned ValNo) { bool WasOriginalArgFloat(unsigned ValNo) {
return OriginalArgWasFloat[ValNo]; return OriginalArgWasFloat[ValNo];
} }
bool WasOriginalArgVectorFloat(unsigned ValNo) const {
return OriginalArgWasFloatVector[ValNo];
}
bool WasOriginalRetVectorFloat(unsigned ValNo) const {
return OriginalRetWasFloatVector[ValNo];
}
bool IsCallOperandFixed(unsigned ValNo) { return CallOperandIsFixed[ValNo]; } bool IsCallOperandFixed(unsigned ValNo) { return CallOperandIsFixed[ValNo]; }
SpecialCallingConvType getSpecialCallingConv() { return SpecialCallingConv; } SpecialCallingConvType getSpecialCallingConv() { return SpecialCallingConv; }
}; };

View File

@ -37,10 +37,6 @@ class CCIfOrigArgWasF128<CCAction A>
class CCIfArgIsVarArg<CCAction A> class CCIfArgIsVarArg<CCAction A>
: CCIf<"!static_cast<MipsCCState *>(&State)->IsCallOperandFixed(ValNo)", A>; : CCIf<"!static_cast<MipsCCState *>(&State)->IsCallOperandFixed(ValNo)", A>;
/// Match if the return was a floating point vector.
class CCIfOrigArgWasNotVectorFloat<CCAction A>
: CCIf<"!static_cast<MipsCCState *>(&State)"
"->WasOriginalRetVectorFloat(ValNo)", A>;
/// Match if the special calling conv is the specified value. /// Match if the special calling conv is the specified value.
class CCIfSpecialCallingConv<string CC, CCAction A> class CCIfSpecialCallingConv<string CC, CCAction A>
@ -97,10 +93,8 @@ def RetCC_MipsO32 : CallingConv<[
// Promote i1/i8/i16 return values to i32. // Promote i1/i8/i16 return values to i32.
CCIfType<[i1, i8, i16], CCPromoteToType<i32>>, CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
// i32 are returned in registers V0, V1, A0, A1, unless the original return // i32 are returned in registers V0, V1, A0, A1
// type was a vector of floats. CCIfType<[i32], CCAssignToReg<[V0, V1, A0, A1]>>,
CCIfOrigArgWasNotVectorFloat<CCIfType<[i32],
CCAssignToReg<[V0, V1, A0, A1]>>>,
// f32 are returned in registers F0, F2 // f32 are returned in registers F0, F2
CCIfType<[f32], CCAssignToReg<[F0, F2]>>, CCIfType<[f32], CCAssignToReg<[F0, F2]>>,

View File

@ -71,48 +71,6 @@ static bool isShiftedMask(uint64_t I, uint64_t &Pos, uint64_t &Size) {
return true; return true;
} }
// The MIPS MSA ABI passes vector arguments in the integer register set.
// The number of integer registers used is dependant on the ABI used.
MVT MipsTargetLowering::getRegisterTypeForCallingConv(MVT VT) const {
if (VT.isVector() && Subtarget.hasMSA())
return Subtarget.isABI_O32() ? MVT::i32 : MVT::i64;
return MipsTargetLowering::getRegisterType(VT);
}
MVT MipsTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
EVT VT) const {
if (VT.isVector()) {
if (Subtarget.isABI_O32()) {
return MVT::i32;
} else {
return (VT.getSizeInBits() == 32) ? MVT::i32 : MVT::i64;
}
}
return MipsTargetLowering::getRegisterType(Context, VT);
}
unsigned MipsTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
EVT VT) const {
if (VT.isVector())
return std::max((VT.getSizeInBits() / (Subtarget.isABI_O32() ? 32 : 64)),
1U);
return MipsTargetLowering::getNumRegisters(Context, VT);
}
unsigned MipsTargetLowering::getVectorTypeBreakdownForCallingConv(
LLVMContext &Context, EVT VT, EVT &IntermediateVT,
unsigned &NumIntermediates, MVT &RegisterVT) const {
// Break down vector types to either 2 i64s or 4 i32s.
RegisterVT = getRegisterTypeForCallingConv(Context, VT) ;
IntermediateVT = RegisterVT;
NumIntermediates = VT.getSizeInBits() < RegisterVT.getSizeInBits()
? VT.getVectorNumElements()
: VT.getSizeInBits() / RegisterVT.getSizeInBits();
return NumIntermediates;
}
SDValue MipsTargetLowering::getGlobalReg(SelectionDAG &DAG, EVT Ty) const { SDValue MipsTargetLowering::getGlobalReg(SelectionDAG &DAG, EVT Ty) const {
MipsFunctionInfo *FI = DAG.getMachineFunction().getInfo<MipsFunctionInfo>(); MipsFunctionInfo *FI = DAG.getMachineFunction().getInfo<MipsFunctionInfo>();
return DAG.getRegister(FI->getGlobalBaseReg(), Ty); return DAG.getRegister(FI->getGlobalBaseReg(), Ty);
@ -2557,11 +2515,6 @@ SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op,
// yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is // yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is
// not used, it must be shadowed. If only A3 is available, shadow it and // not used, it must be shadowed. If only A3 is available, shadow it and
// go to stack. // go to stack.
// vXiX - Received as scalarized i32s, passed in A0 - A3 and the stack.
// vXf32 - Passed in either a pair of registers {A0, A1}, {A2, A3} or {A0 - A3}
// with the remainder spilled to the stack.
// vXf64 - Passed in either {A0, A1, A2, A3} or {A2, A3} and in both cases
// spilling the remainder to the stack.
// //
// For vararg functions, all arguments are passed in A0, A1, A2, A3 and stack. // For vararg functions, all arguments are passed in A0, A1, A2, A3 and stack.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
@ -2573,13 +2526,8 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
State.getMachineFunction().getSubtarget()); State.getMachineFunction().getSubtarget());
static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 }; static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
const MipsCCState * MipsState = static_cast<MipsCCState *>(&State);
static const MCPhysReg F32Regs[] = { Mips::F12, Mips::F14 }; static const MCPhysReg F32Regs[] = { Mips::F12, Mips::F14 };
static const MCPhysReg FloatVectorIntRegs[] = { Mips::A0, Mips::A2 };
// Do not process byval args here. // Do not process byval args here.
if (ArgFlags.isByVal()) if (ArgFlags.isByVal())
return true; return true;
@ -2617,26 +2565,8 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
State.getFirstUnallocated(F32Regs) != ValNo; State.getFirstUnallocated(F32Regs) != ValNo;
unsigned OrigAlign = ArgFlags.getOrigAlign(); unsigned OrigAlign = ArgFlags.getOrigAlign();
bool isI64 = (ValVT == MVT::i32 && OrigAlign == 8); bool isI64 = (ValVT == MVT::i32 && OrigAlign == 8);
bool isVectorFloat = MipsState->WasOriginalArgVectorFloat(ValNo);
// The MIPS vector ABI for floats passes them in a pair of registers if (ValVT == MVT::i32 || (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
if (ValVT == MVT::i32 && isVectorFloat) {
// This is the start of an vector that was scalarized into an unknown number
// of components. It doesn't matter how many there are. Allocate one of the
// notional 8 byte aligned registers which map onto the argument stack, and
// shadow the register lost to alignment requirements.
if (ArgFlags.isSplit()) {
Reg = State.AllocateReg(FloatVectorIntRegs);
if (Reg == Mips::A2)
State.AllocateReg(Mips::A1);
else if (Reg == 0)
State.AllocateReg(Mips::A3);
} else {
// If we're an intermediate component of the split, we can just attempt to
// allocate a register directly.
Reg = State.AllocateReg(IntRegs);
}
} else if (ValVT == MVT::i32 || (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
Reg = State.AllocateReg(IntRegs); Reg = State.AllocateReg(IntRegs);
// If this is the first part of an i64 arg, // If this is the first part of an i64 arg,
// the allocated register must be either A0 or A2. // the allocated register must be either A0 or A2.

View File

@ -248,33 +248,6 @@ namespace llvm {
bool isCheapToSpeculateCttz() const override; bool isCheapToSpeculateCttz() const override;
bool isCheapToSpeculateCtlz() const override; bool isCheapToSpeculateCtlz() const override;
/// Return the register type for a given MVT, ensuring vectors are treated
/// as a series of gpr sized integers.
virtual MVT getRegisterTypeForCallingConv(MVT VT) const override;
/// Return the register type for a given MVT, ensuring vectors are treated
/// as a series of gpr sized integers.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context,
EVT VT) const override;
/// Return the number of registers for a given MVT, ensuring vectors are
/// treated as a series of gpr sized integers.
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context,
EVT VT) const override;
/// Break down vectors to the correct number of gpr sized integers.
virtual unsigned getVectorTypeBreakdownForCallingConv(
LLVMContext &Context, EVT VT, EVT &IntermediateVT,
unsigned &NumIntermediates, MVT &RegisterVT) const override;
/// Return the correct alignment for the current calling convention.
virtual unsigned
getABIAlignmentForCallingConv(Type *ArgTy, DataLayout DL) const override {
if (ArgTy->isVectorTy())
return std::min(DL.getABITypeAlignment(ArgTy), 8U);
return DL.getABITypeAlignment(ArgTy);
}
ISD::NodeType getExtendForAtomicOps() const override { ISD::NodeType getExtendForAtomicOps() const override {
return ISD::SIGN_EXTEND; return ISD::SIGN_EXTEND;
} }

View File

@ -286,9 +286,7 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
DEBUG(errs() << "FrameIndex : " << FrameIndex << "\n" DEBUG(errs() << "FrameIndex : " << FrameIndex << "\n"
<< "spOffset : " << spOffset << "\n" << "spOffset : " << spOffset << "\n"
<< "stackSize : " << stackSize << "\n" << "stackSize : " << stackSize << "\n");
<< "alignment : "
<< MF.getFrameInfo().getObjectAlignment(FrameIndex) << "\n");
eliminateFI(MI, FIOperandNum, FrameIndex, stackSize, spOffset); eliminateFI(MI, FIOperandNum, FrameIndex, stackSize, spOffset);
} }

File diff suppressed because it is too large Load Diff

View File

@ -8,14 +8,10 @@ entry:
; MIPS32: clz $2, $4 ; MIPS32: clz $2, $4
; MIPS32: clz $3, $5 ; MIPS32: clz $3, $5
; MIPS64-DAG: dsrl $[[A0:[0-9]+]], $4, 32 ; MIPS64-DAG: sll $[[A0:[0-9]+]], $4, 0
; MIPS64-DAG: sll $[[A1:[0-9]+]], $[[A0]], 0 ; MIPS64-DAG: clz $2, $[[A0]]
; MIPS64-DAG: clz $[[R0:[0-9]+]], $[[A1]] ; MIPS64-DAG: sll $[[A1:[0-9]+]], $5, 0
; MIPS64-DAG: dsll $[[R1:[0-9]+]], $[[R0]], 32 ; MIPS64-DAG: clz $3, $[[A1]]
; MIPS64-DAG: sll $[[A2:[0-9]+]], $4, 0
; MIPS64-DAG: clz $[[R2:[0-9]+]], $[[A2]]
; MIPS64-DAG: dext $[[R3:[0-9]+]], $[[R2]], 0, 32
; MIPS64-DAG: or $2, $[[R3]], $[[R1]]
%ret = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %x, i1 true) %ret = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %x, i1 true)
ret <2 x i32> %ret ret <2 x i32> %ret

View File

@ -24,17 +24,14 @@ entry:
; MIPS64-DAG: and $[[R2:[0-9]+]], $[[R1]], $[[R0]] ; MIPS64-DAG: and $[[R2:[0-9]+]], $[[R1]], $[[R0]]
; MIPS64-DAG: clz $[[R3:[0-9]+]], $[[R2]] ; MIPS64-DAG: clz $[[R3:[0-9]+]], $[[R2]]
; MIPS64-DAG: addiu $[[R4:[0-9]+]], $zero, 32 ; MIPS64-DAG: addiu $[[R4:[0-9]+]], $zero, 32
; MIPS64-DAG: subu $[[R5:[0-9]+]], $[[R4]], $[[R3]] ; MIPS64-DAG: subu $2, $[[R4]], $[[R3]]
; MIPS64-DAG: dsrl $[[R6:[0-9]+]], $4, 32 ; MIPS64-DAG: sll $[[A1:[0-9]+]], $5, 0
; MIPS64-DAG: sll $[[R7:[0-9]+]], $[[R6]], 0 ; MIPS64-DAG: addiu $[[R5:[0-9]+]], $[[A1]], -1
; MIPS64-DAG: dext $[[R8:[0-9]+]], $[[R5]], 0, 32 ; MIPS64-DAG: not $[[R6:[0-9]+]], $[[A1]]
; MIPS64-DAG: addiu $[[R9:[0-9]+]], $[[R7]], -1 ; MIPS64-DAG: and $[[R7:[0-9]+]], $[[R6]], $[[R5]]
; MIPS64-DAG: not $[[R10:[0-9]+]], $[[R7]] ; MIPS64-DAG: clz $[[R8:[0-9]+]], $[[R7]]
; MIPS64-DAG: and $[[R11:[0-9]+]], $[[R10]], $[[R9]] ; MIPS64-DAG: jr $ra
; MIPS64-DAG: clz $[[R12:[0-9]+]], $[[R11]] ; MIPS64-DAG: subu $3, $[[R4]], $[[R8]]
; MIPS64-DAG: subu $[[R13:[0-9]+]], $[[R4]], $[[R12]]
; MIPS64-DAG: dsll $[[R14:[0-9]+]], $[[R13]], 32
; MIPS64-DAG: or $2, $[[R8]], $[[R14]]
%ret = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %x, i1 true) %ret = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %x, i1 true)
ret <2 x i32> %ret ret <2 x i32> %ret

View File

@ -128,11 +128,8 @@ entry:
; CHECK-LABEL: call_f2: ; CHECK-LABEL: call_f2:
; CHECK: call16(f2) ; CHECK: call16(f2)
; CHECK: addiu $4, $sp, [[O0:[0-9]+]] ; CHECK-NOT: lwc1
; CHECK-DAG: lwc1 $f[[F0:[0-9]]], [[O0]]($sp) ; CHECK: add.s $[[R2:[a-z0-9]+]], $[[R0:[a-z0-9]+]], $[[R1:[a-z0-9]+]]
; CHECK-DAG: lwc1 $f[[F1:[0-9]]], 20($sp)
; CHECK: add.s $f0, $f[[F0]], $f[[F1]]
} }
@ -146,13 +143,12 @@ entry:
; CHECK-LABEL: call_d2: ; CHECK-LABEL: call_d2:
; CHECK: call16(d2) ; CHECK: call16(d2)
; CHECK: addiu $4, $sp, [[O0:[0-9]+]] ; CHECK-NOT: ldc1
; CHECK-DAG: ldc1 $f[[F0:[0-9]+]], 24($sp) ; CHECK: add.d $[[R2:[a-z0-9]+]], $[[R0:[a-z0-9]+]], $[[R1:[a-z0-9]+]]
; CHECK-DAG: ldc1 $f[[F1:[0-9]+]], [[O0]]($sp)
; CHECK: add.d $f0, $f[[F1]], $f[[F0]]
} }
; Check that function returns vector on stack in cases when vector can't be ; Check that function returns vector on stack in cases when vector can't be
; returned in registers. Also check that vector is placed on stack starting ; returned in registers. Also check that vector is placed on stack starting
; from the address in register $4. ; from the address in register $4.
@ -183,12 +179,11 @@ entry:
ret <4 x float> %vecins4 ret <4 x float> %vecins4
; CHECK-LABEL: return_f4: ; CHECK-LABEL: return_f4:
; CHECK-DAG: lwc1 $f[[R0:[0-9]+]], 16($sp) ; CHECK-DAG: lwc1 $[[R0:[a-z0-9]+]], 16($sp)
; CHECK-DAG: swc1 $f[[R0]], 12($4) ; CHECK-DAG: swc1 $[[R0]], 12($4)
; CHECK-DAG: sw $7, 8($4) ; CHECK-DAG: sw $7, 8($4)
; CHECK-DAG: sw $6, 4($4) ; CHECK-DAG: sw $6, 4($4)
; CHECK-DAG: sw $5, 0($4) ; CHECK-DAG: sw $5, 0($4)
} }
@ -232,8 +227,8 @@ entry:
ret <2 x float> %vecins2 ret <2 x float> %vecins2
; CHECK-LABEL: return_f2: ; CHECK-LABEL: return_f2:
; CHECK-DAG: sw $5, 0($4) ; CHECK: mov.s $f0, $f12
; CHECK-DAG: sw $6, 4($4) ; CHECK: mov.s $f2, $f14
} }
@ -244,10 +239,6 @@ entry:
ret <2 x double> %vecins2 ret <2 x double> %vecins2
; CHECK-LABEL: return_d2: ; CHECK-LABEL: return_d2:
; CHECK-DAG: ldc1 $f[[F0:[0-9]]], 16($sp) ; CHECK: mov.d $f0, $f12
; CHECK-DAG: sdc1 $f[[F0]], 8($4) ; CHECK: mov.d $f2, $f14
; CHECK-DAG: mtc1 $6, $f[[F1:[0-9]+]]
; CHECK-DAG: mtc1 $7, $f
; CHECK-DAG: sdc1 $f[[F0]], 0($4)
} }