[X86] Improve the lowering of BITCAST from MVT::f64 to MVT::v4i16/MVT::v8i8.

This patch teaches the x86 backend how to efficiently lower ISD::BITCAST dag
nodes from MVT::f64 to MVT::v4i16 (and vice versa), and from MVT::f64 to
MVT::v8i8 (and vice versa).

This patch extends the logic from revision 208107 to also handle MVT::v4i16
and MVT::v8i8. Also, this patch correctly propagates Undef values when
performing the widening of a vector (example: when widening from v2i32 to
v4i32, the upper 64bits of the resulting vector are 'undef').

llvm-svn: 209451
This commit is contained in:
Andrea Di Biagio 2014-05-22 16:21:39 +00:00
parent dad4acbc52
commit c8dd1ad85b
4 changed files with 195 additions and 100 deletions

View File

@ -1040,6 +1040,8 @@ void X86TargetLowering::resetOperationActions() {
setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, Legal); setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, Legal);
setOperationAction(ISD::BITCAST, MVT::v2i32, Custom); setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
} }
if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) { if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
@ -14276,19 +14278,31 @@ static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
MVT SrcVT = Op.getOperand(0).getSimpleValueType(); MVT SrcVT = Op.getOperand(0).getSimpleValueType();
MVT DstVT = Op.getSimpleValueType(); MVT DstVT = Op.getSimpleValueType();
if (SrcVT == MVT::v2i32) { if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) {
assert(Subtarget->hasSSE2() && "Requires at least SSE2!"); assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
if (DstVT != MVT::f64) if (DstVT != MVT::f64)
// This conversion needs to be expanded. // This conversion needs to be expanded.
return SDValue(); return SDValue();
SDValue InVec = Op->getOperand(0);
SDLoc dl(Op); SDLoc dl(Op);
SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, unsigned NumElts = SrcVT.getVectorNumElements();
Op->getOperand(0), DAG.getIntPtrConstant(0)); EVT SVT = SrcVT.getVectorElementType();
SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
Op->getOperand(0), DAG.getIntPtrConstant(1)); // Widen the vector in input in the case of MVT::v2i32.
SDValue Elts[] = {Elt0, Elt1, Elt0, Elt0}; // Example: from MVT::v2i32 to MVT::v4i32.
SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Elts); SmallVector<SDValue, 16> Elts;
for (unsigned i = 0, e = NumElts; i != e; ++i)
Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
DAG.getIntPtrConstant(i)));
// Explicitly mark the extra elements as Undef.
SDValue Undef = DAG.getUNDEF(SVT);
for (unsigned i = NumElts, e = NumElts * 2; i != e; ++i)
Elts.push_back(Undef);
EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV); SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64, return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
DAG.getIntPtrConstant(0)); DAG.getIntPtrConstant(0));
@ -14758,17 +14772,23 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
EVT DstVT = N->getValueType(0); EVT DstVT = N->getValueType(0);
EVT SrcVT = N->getOperand(0)->getValueType(0); EVT SrcVT = N->getOperand(0)->getValueType(0);
if (SrcVT == MVT::f64 && DstVT == MVT::v2i32) { if (SrcVT != MVT::f64 ||
SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
MVT::v2f64, N->getOperand(0)); return;
SDValue ToV4I32 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Expanded);
SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, unsigned NumElts = DstVT.getVectorNumElements();
ToV4I32, DAG.getIntPtrConstant(0)); EVT SVT = DstVT.getVectorElementType();
SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
ToV4I32, DAG.getIntPtrConstant(1)); SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
SDValue Elts[] = {Elt0, Elt1}; MVT::v2f64, N->getOperand(0));
Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Elts)); SDValue ToVecInt = DAG.getNode(ISD::BITCAST, dl, WiderVT, Expanded);
}
SmallVector<SDValue, 8> Elts;
for (unsigned i = 0, e = NumElts; i != e; ++i)
Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
ToVecInt, DAG.getIntPtrConstant(i)));
Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
} }
} }
} }

View File

@ -1,80 +0,0 @@
; RUN: llc < %s -march=x86-64 -mcpu=core2 -mattr=+sse2 | FileCheck %s
define double @test1(double %A) {
%1 = bitcast double %A to <2 x i32>
%add = add <2 x i32> %1, <i32 3, i32 5>
%2 = bitcast <2 x i32> %add to double
ret double %2
}
; FIXME: Ideally we should be able to fold the entire body of @test1 into a
; single paddd instruction. At the moment we produce the sequence
; pshufd+paddq+pshufd.
; CHECK-LABEL: test1
; CHECK-NOT: movsd
; CHECK: pshufd
; CHECK-NEXT: paddq
; CHECK-NEXT: pshufd
; CHECK-NEXT: ret
define double @test2(double %A, double %B) {
%1 = bitcast double %A to <2 x i32>
%2 = bitcast double %B to <2 x i32>
%add = add <2 x i32> %1, %2
%3 = bitcast <2 x i32> %add to double
ret double %3
}
; FIXME: Ideally we should be able to fold the entire body of @test2 into a
; single 'paddd %xmm1, %xmm0' instruction. At the moment we produce the
; sequence pshufd+pshufd+paddq+pshufd.
; CHECK-LABEL: test2
; CHECK-NOT: movsd
; CHECK: pshufd
; CHECK-NEXT: pshufd
; CHECK-NEXT: paddq
; CHECK-NEXT: pshufd
; CHECK-NEXT: ret
define i64 @test3(i64 %A) {
%1 = bitcast i64 %A to <2 x float>
%add = fadd <2 x float> %1, <float 3.0, float 5.0>
%2 = bitcast <2 x float> %add to i64
ret i64 %2
}
; CHECK-LABEL: test3
; CHECK-NOT: pshufd
; CHECK: addps
; CHECK-NOT: pshufd
; CHECK: ret
define i64 @test4(i64 %A) {
%1 = bitcast i64 %A to <2 x i32>
%add = add <2 x i32> %1, <i32 3, i32 5>
%2 = bitcast <2 x i32> %add to i64
ret i64 %2
}
; FIXME: At the moment we still produce the sequence pshufd+paddq+pshufd.
; Ideally, we should fold that sequence into a single paddd.
; CHECK-LABEL: test4
; CHECK: pshufd
; CHECK-NEXT: paddq
; CHECK-NEXT: pshufd
; CHECK: ret
define double @test5(double %A) {
%1 = bitcast double %A to <2 x float>
%add = fadd <2 x float> %1, <float 3.0, float 5.0>
%2 = bitcast <2 x float> %add to double
ret double %2
}
; CHECK-LABEL: test5
; CHECK: addps
; CHECK-NEXT: ret

View File

@ -0,0 +1,155 @@
; RUN: llc < %s -march=x86-64 -mcpu=core2 -mattr=+sse2 | FileCheck %s
define double @test1(double %A) {
%1 = bitcast double %A to <2 x i32>
%add = add <2 x i32> %1, <i32 3, i32 5>
%2 = bitcast <2 x i32> %add to double
ret double %2
}
; FIXME: Ideally we should be able to fold the entire body of @test1 into a
; single paddd instruction. At the moment we produce the sequence
; pshufd+paddq+pshufd.
; CHECK-LABEL: test1
; CHECK-NOT: movsd
; CHECK: pshufd
; CHECK-NEXT: paddq
; CHECK-NEXT: pshufd
; CHECK-NEXT: ret
define double @test2(double %A, double %B) {
%1 = bitcast double %A to <2 x i32>
%2 = bitcast double %B to <2 x i32>
%add = add <2 x i32> %1, %2
%3 = bitcast <2 x i32> %add to double
ret double %3
}
; FIXME: Ideally we should be able to fold the entire body of @test2 into a
; single 'paddd %xmm1, %xmm0' instruction. At the moment we produce the
; sequence pshufd+pshufd+paddq+pshufd.
; CHECK-LABEL: test2
; CHECK-NOT: movsd
; CHECK: pshufd
; CHECK-NEXT: pshufd
; CHECK-NEXT: paddq
; CHECK-NEXT: pshufd
; CHECK-NEXT: ret
define i64 @test3(i64 %A) {
%1 = bitcast i64 %A to <2 x float>
%add = fadd <2 x float> %1, <float 3.0, float 5.0>
%2 = bitcast <2 x float> %add to i64
ret i64 %2
}
; CHECK-LABEL: test3
; CHECK-NOT: pshufd
; CHECK: addps
; CHECK-NOT: pshufd
; CHECK: ret
define i64 @test4(i64 %A) {
%1 = bitcast i64 %A to <2 x i32>
%add = add <2 x i32> %1, <i32 3, i32 5>
%2 = bitcast <2 x i32> %add to i64
ret i64 %2
}
; FIXME: At the moment we still produce the sequence pshufd+paddq+pshufd.
; Ideally, we should fold that sequence into a single paddd.
; CHECK-LABEL: test4
; CHECK: pshufd
; CHECK-NEXT: paddq
; CHECK-NEXT: pshufd
; CHECK: ret
define double @test5(double %A) {
%1 = bitcast double %A to <2 x float>
%add = fadd <2 x float> %1, <float 3.0, float 5.0>
%2 = bitcast <2 x float> %add to double
ret double %2
}
; CHECK-LABEL: test5
; CHECK: addps
; CHECK-NEXT: ret
define double @test6(double %A) {
%1 = bitcast double %A to <4 x i16>
%add = add <4 x i16> %1, <i16 3, i16 4, i16 5, i16 6>
%2 = bitcast <4 x i16> %add to double
ret double %2
}
; FIXME: Ideally we should be able to fold the entire body of @test6 into a
; single paddw instruction.
; CHECK-LABEL: test6
; CHECK-NOT: movsd
; CHECK: punpcklwd
; CHECK-NEXT: paddd
; CHECK-NEXT: pshufb
; CHECK-NEXT: ret
define double @test7(double %A, double %B) {
%1 = bitcast double %A to <4 x i16>
%2 = bitcast double %B to <4 x i16>
%add = add <4 x i16> %1, %2
%3 = bitcast <4 x i16> %add to double
ret double %3
}
; FIXME: Ideally we should be able to fold the entire body of @test7 into a
; single 'paddw %xmm1, %xmm0' instruction. At the moment we produce the
; sequence pshufd+pshufd+paddd+pshufd.
; CHECK-LABEL: test7
; CHECK-NOT: movsd
; CHECK: punpcklwd
; CHECK-NEXT: punpcklwd
; CHECK-NEXT: paddd
; CHECK-NEXT: pshufb
; CHECK-NEXT: ret
define double @test8(double %A) {
%1 = bitcast double %A to <8 x i8>
%add = add <8 x i8> %1, <i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10>
%2 = bitcast <8 x i8> %add to double
ret double %2
}
; FIXME: Ideally we should be able to fold the entire body of @test8 into a
; single paddb instruction. At the moment we produce the sequence
; pshufd+paddw+pshufd.
; CHECK-LABEL: test8
; CHECK-NOT: movsd
; CHECK: punpcklbw
; CHECK-NEXT: paddw
; CHECK-NEXT: pshufb
; CHECK-NEXT: ret
define double @test9(double %A, double %B) {
%1 = bitcast double %A to <8 x i8>
%2 = bitcast double %B to <8 x i8>
%add = add <8 x i8> %1, %2
%3 = bitcast <8 x i8> %add to double
ret double %3
}
; FIXME: Ideally we should be able to fold the entire body of @test9 into a
; single 'paddb %xmm1, %xmm0' instruction. At the moment we produce the
; sequence pshufd+pshufd+paddw+pshufd.
; CHECK-LABEL: test9
; CHECK-NOT: movsd
; CHECK: punpcklbw
; CHECK-NEXT: punpcklbw
; CHECK-NEXT: paddw
; CHECK-NEXT: pshufb
; CHECK-NEXT: ret

View File

@ -33,8 +33,8 @@ define <2 x i32> @t3() nounwind {
define double @t4() nounwind { define double @t4() nounwind {
ret double bitcast (<2 x i32> <i32 1, i32 0> to double) ret double bitcast (<2 x i32> <i32 1, i32 0> to double)
; CHECK-LABEL: t4: ; CHECK-LABEL: t4:
; CHECK-NOT: movl $1 ; CHECK: movl $1
; CHECK-NOT: pshufd ; CHECK-NOT: pshufd
; CHECK: movsd {{.*}}, %xmm0 ; CHECK: movd {{.*}}, %xmm0
} }