First chunk of actually generating vector code for packed types. These

changes allow us to generate the following code:

_foo:
        li r2, 0
        lvx v0, r2, r3
        vaddfp v0, v0, v0
        stvx v0, r2, r3
        blr

for this llvm:

void %foo(<4 x float>* %a) {
entry:
        %tmp1 = load <4 x float>* %a
        %tmp2 = add <4 x float> %tmp1, %tmp1
        store <4 x float> %tmp2, <4 x float>* %a
        ret void
}

llvm-svn: 24534
This commit is contained in:
Nate Begeman 2005-11-30 08:22:07 +00:00
parent 6db615df14
commit 1064d6ec43
3 changed files with 80 additions and 17 deletions

View File

@ -45,10 +45,8 @@ namespace MVT { // MVT = Machine Value Types
isVoid = 12, // This has no value
Vector = 13, // This is an abstract vector type, which will
// be refined into a target vector type, or
// scalarized.
// These are 128 bit vectors of varying packed types
// be expanded into a target vector type, or scalars
// if no matching vector type is available.
v16i8 = 14, // 16 x i8
v8i16 = 15, // 8 x i16
v4i32 = 16, // 4 x i32
@ -70,6 +68,21 @@ namespace MVT { // MVT = Machine Value Types
return (VT >= v16i8 && VT <= v2f64);
}
/// getVectorType - Returns the ValueType that represents a vector NumElements
/// in length, where each element is of type VT. If there is no ValueType
/// that represents this vector, a ValueType of Other is returned.
///
static inline ValueType getVectorType(ValueType VT, unsigned NumElements) {
switch (VT) {
default:
break;
case MVT::f32:
if (NumElements == 4) return MVT::v4f32;
break;
}
return MVT::Other;
}
static inline unsigned getSizeInBits(ValueType VT) {
switch (VT) {
default: assert(0 && "ValueType has no known size!");

View File

@ -520,12 +520,19 @@ void SelectionDAGLowering::visitBinary(User &I, unsigned IntOp, unsigned FPOp,
const PackedType *PTy = cast<PackedType>(Ty);
unsigned NumElements = PTy->getNumElements();
MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
// Immediately scalarize packed types containing only one element, so that
// the Legalize pass does not have to deal with them.
// the Legalize pass does not have to deal with them. Similarly, if the
// abstract vector is going to turn into one that the target natively
// supports, generate that type now so that Legalize doesn't have to deal
// with that either. These steps ensure that Legalize only has to handle
// vector types in its Expand case.
unsigned Opc = MVT::isFloatingPoint(PVT) ? FPOp : IntOp;
if (NumElements == 1) {
unsigned Opc = MVT::isFloatingPoint(PVT) ? FPOp : IntOp;
setValue(&I, DAG.getNode(Opc, PVT, Op1, Op2));
} else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
setValue(&I, DAG.getNode(Opc, TVT, Op1, Op2));
} else {
SDOperand Num = DAG.getConstant(NumElements, MVT::i32);
SDOperand Typ = DAG.getValueType(PVT);
@ -777,11 +784,14 @@ void SelectionDAGLowering::visitLoad(LoadInst &I) {
const PackedType *PTy = cast<PackedType>(Ty);
unsigned NumElements = PTy->getNumElements();
MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
// Immediately scalarize packed types containing only one element, so that
// the Legalize pass does not have to deal with them.
if (NumElements == 1) {
L = DAG.getLoad(PVT, Root, Ptr, DAG.getSrcValue(I.getOperand(0)));
} else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
L = DAG.getLoad(TVT, Root, Ptr, DAG.getSrcValue(I.getOperand(0)));
} else {
L = DAG.getVecLoad(NumElements, PVT, Root, Ptr,
DAG.getSrcValue(I.getOperand(0)));

View File

@ -73,6 +73,11 @@ namespace {
/// load/store instruction, and return true if it should be an indexed [r+r]
/// operation.
bool SelectAddr(SDOperand Addr, SDOperand &Op1, SDOperand &Op2);
/// SelectAddrIndexed - Given the specified addressed, force it to be
/// represented as an indexed [r+r] operation, rather than possibly
/// returning [r+imm] as SelectAddr may.
void SelectAddrIndexed(SDOperand Addr, SDOperand &Op1, SDOperand &Op2);
SDOperand BuildSDIVSequence(SDNode *N);
SDOperand BuildUDIVSequence(SDNode *N);
@ -428,7 +433,7 @@ bool PPCDAGToDAGISel::SelectAddr(SDOperand Addr, SDOperand &Op1,
}
}
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Addr)) {
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Addr)) {
Op1 = getI32Imm(0);
Op2 = CurDAG->getTargetFrameIndex(FI->getIndex(), MVT::i32);
return false;
@ -445,6 +450,26 @@ bool PPCDAGToDAGISel::SelectAddr(SDOperand Addr, SDOperand &Op1,
return false;
}
/// SelectAddrIndexed - Given the specified addressed, force it to be
/// represented as an indexed [r+r] operation, rather than possibly
/// returning [r+imm] as SelectAddr may.
void PPCDAGToDAGISel::SelectAddrIndexed(SDOperand Addr, SDOperand &Op1,
SDOperand &Op2) {
if (Addr.getOpcode() == ISD::ADD) {
Op1 = Select(Addr.getOperand(0));
Op2 = Select(Addr.getOperand(1));
return;
}
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Addr)) {
Op1 = CurDAG->getTargetNode(PPC::LI, MVT::i32, getI32Imm(0));
Op2 = CurDAG->getTargetFrameIndex(FI->getIndex(), MVT::i32);
return;
}
Op1 = CurDAG->getTargetNode(PPC::LI, MVT::i32, getI32Imm(0));
Op2 = Select(Addr);
}
/// SelectCC - Select a comparison of the specified values with the specified
/// condition code, returning the CR# of the expression.
SDOperand PPCDAGToDAGISel::SelectCC(SDOperand LHS, SDOperand RHS,
@ -916,9 +941,8 @@ SDOperand PPCDAGToDAGISel::Select(SDOperand Op) {
}
}
CurDAG->SelectNodeTo(N, Ty == MVT::f64 ? PPC::FADD : PPC::FADDS, Ty,
Select(N->getOperand(0)), Select(N->getOperand(1)));
return SDOperand(N, 0);
// Other cases are autogenerated.
break;
}
case ISD::FSUB: {
MVT::ValueType Ty = N->getValueType(0);
@ -942,10 +966,9 @@ SDOperand PPCDAGToDAGISel::Select(SDOperand Op) {
return SDOperand(N, 0);
}
}
CurDAG->SelectNodeTo(N, Ty == MVT::f64 ? PPC::FSUB : PPC::FSUBS, Ty,
Select(N->getOperand(0)),
Select(N->getOperand(1)));
return SDOperand(N, 0);
// Other cases are autogenerated.
break;
}
case ISD::SDIV: {
// FIXME: since this depends on the setting of the carry flag from the srawi
@ -1074,10 +1097,17 @@ SDOperand PPCDAGToDAGISel::Select(SDOperand Op) {
case ISD::ZEXTLOAD:
case ISD::SEXTLOAD: {
SDOperand Op1, Op2;
bool isIdx = SelectAddr(N->getOperand(1), Op1, Op2);
// If this is a vector load, then force this to be indexed addressing, since
// altivec does not have immediate offsets for loads.
bool isIdx = true;
if (N->getOpcode() == ISD::LOAD && MVT::isVector(N->getValueType(0))) {
SelectAddrIndexed(N->getOperand(1), Op1, Op2);
} else {
isIdx = SelectAddr(N->getOperand(1), Op1, Op2);
}
MVT::ValueType TypeBeingLoaded = (N->getOpcode() == ISD::LOAD) ?
N->getValueType(0) : cast<VTSDNode>(N->getOperand(3))->getVT();
unsigned Opc;
switch (TypeBeingLoaded) {
default: N->dump(); assert(0 && "Cannot load this type!");
@ -1093,6 +1123,7 @@ SDOperand PPCDAGToDAGISel::Select(SDOperand Op) {
case MVT::i32: Opc = isIdx ? PPC::LWZX : PPC::LWZ; break;
case MVT::f32: Opc = isIdx ? PPC::LFSX : PPC::LFS; break;
case MVT::f64: Opc = isIdx ? PPC::LFDX : PPC::LFD; break;
case MVT::v4f32: Opc = PPC::LVX; break;
}
// If this is an f32 -> f64 load, emit the f32 load, then use an 'extending
@ -1119,7 +1150,15 @@ SDOperand PPCDAGToDAGISel::Select(SDOperand Op) {
case ISD::TRUNCSTORE:
case ISD::STORE: {
SDOperand AddrOp1, AddrOp2;
bool isIdx = SelectAddr(N->getOperand(2), AddrOp1, AddrOp2);
// If this is a vector store, then force this to be indexed addressing,
// since altivec does not have immediate offsets for stores.
bool isIdx = true;
if (N->getOpcode() == ISD::STORE &&
MVT::isVector(N->getOperand(1).getValueType())) {
SelectAddrIndexed(N->getOperand(2), AddrOp1, AddrOp2);
} else {
isIdx = SelectAddr(N->getOperand(2), AddrOp1, AddrOp2);
}
unsigned Opc;
if (N->getOpcode() == ISD::STORE) {
@ -1128,6 +1167,7 @@ SDOperand PPCDAGToDAGISel::Select(SDOperand Op) {
case MVT::i32: Opc = isIdx ? PPC::STWX : PPC::STW; break;
case MVT::f64: Opc = isIdx ? PPC::STFDX : PPC::STFD; break;
case MVT::f32: Opc = isIdx ? PPC::STFSX : PPC::STFS; break;
case MVT::v4f32: Opc = PPC::STVX;
}
} else { //ISD::TRUNCSTORE
switch(cast<VTSDNode>(N->getOperand(4))->getVT()) {