[x86] Teach the new vector shuffle lowering the basics about insertion

of a single element into a zero vector for v4f64 and v4i64 in AVX.
Ironically, there is less to see here because xor+blend is so crazy fast
that we can't really beat that to zero the high 128-bit lane.

llvm-svn: 218214
This commit is contained in:
Chandler Carruth 2014-09-21 12:49:46 +00:00
parent 43f5974ea0
commit 33eda72802
2 changed files with 67 additions and 0 deletions

View File

@ -9243,6 +9243,15 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
if (isShuffleEquivalent(Mask, 5, 1, 7, 3))
return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V2, V1);
// If we have a single input to the zero element, insert that into V1 if we
// can do so cheaply.
int NumV2Elements =
std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
if (NumV2Elements == 1 && Mask[0] >= 4)
if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG))
return Insertion;
if (SDValue Blend =
lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask, DAG))
return Blend;
@ -9306,6 +9315,15 @@ static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
if (is128BitLaneCrossingShuffleMask(MVT::v4i64, Mask))
return splitAndLower256BitVectorShuffle(Op, V1, V2, Subtarget, DAG);
// If we have a single input to the zero element, insert that into V1 if we
// can do so cheaply.
int NumV2Elements =
std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
if (NumV2Elements == 1 && Mask[0] >= 4)
if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
MVT::v4i64, DL, V1, V2, Mask, Subtarget, DAG))
return Insertion;
// AVX1 doesn't provide any facilities for v4i64 shuffles, bitcast and
// delegate to floating point code.
V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f64, V1);

View File

@ -563,3 +563,52 @@ define <4 x i64> @stress_test1(<4 x i64> %a, <4 x i64> %b) {
ret <4 x i64> %f
}
define <4 x i64> @insert_reg_and_zero_v4i64(i64 %a) {
; ALL-LABEL: @insert_reg_and_zero_v4i64
; ALL: # BB#0:
; ALL-NEXT: vmovq %rdi, %xmm0
; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1
; ALL-NEXT: vblendpd {{.*}} # ymm0 = ymm0[0],ymm1[1,2,3]
; ALL-NEXT: retq
%v = insertelement <4 x i64> undef, i64 %a, i64 0
%shuffle = shufflevector <4 x i64> %v, <4 x i64> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
ret <4 x i64> %shuffle
}
define <4 x i64> @insert_mem_and_zero_v4i64(i64* %ptr) {
; ALL-LABEL: @insert_mem_and_zero_v4i64
; ALL: # BB#0:
; ALL-NEXT: vmovq (%rdi), %xmm0
; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1
; ALL-NEXT: vblendpd {{.*}} # ymm0 = ymm0[0],ymm1[1,2,3]
; ALL-NEXT: retq
%a = load i64* %ptr
%v = insertelement <4 x i64> undef, i64 %a, i64 0
%shuffle = shufflevector <4 x i64> %v, <4 x i64> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
ret <4 x i64> %shuffle
}
define <4 x double> @insert_reg_and_zero_v4f64(double %a) {
; ALL-LABEL: @insert_reg_and_zero_v4f64
; ALL: # BB#0:
; ALL: vxorpd %ymm1, %ymm1, %ymm1
; ALL-NEXT: vblendpd {{.*}} # ymm0 = ymm0[0],ymm1[1,2,3]
; ALL-NEXT: retq
%v = insertelement <4 x double> undef, double %a, i32 0
%shuffle = shufflevector <4 x double> %v, <4 x double> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
ret <4 x double> %shuffle
}
define <4 x double> @insert_mem_and_zero_v4f64(double* %ptr) {
; ALL-LABEL: @insert_mem_and_zero_v4f64
; ALL: # BB#0:
; ALL-NEXT: vmovsd (%rdi), %xmm0
; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1
; ALL-NEXT: vblendpd {{.*}} # ymm0 = ymm0[0],ymm1[1,2,3]
; ALL-NEXT: retq
%a = load double* %ptr
%v = insertelement <4 x double> undef, double %a, i32 0
%shuffle = shufflevector <4 x double> %v, <4 x double> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
ret <4 x double> %shuffle
}