[x86] Refresh the checks of a number of tests using

update_llc_test_checks.py.

The exact format of the checks has changed over time. This includes
different indenting rules, new shuffle comments that have been added,
and more operand hiding behind regular expressions.

No functional change to the tests are expected here, but this will make
subsequent patches have a clean diff as they change shuffle lowering.

llvm-svn: 228097
This commit is contained in:
Chandler Carruth 2015-02-04 00:58:42 +00:00
parent abde67eb1c
commit abd09a1f35
5 changed files with 310 additions and 239 deletions

View File

@ -75,7 +75,7 @@ define <4 x i32> @test5(i8** %ptr) nounwind {
; CHECK: ## BB#0: ; CHECK: ## BB#0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl (%eax), %eax ; CHECK-NEXT: movl (%eax), %eax
; CHECK-NEXT: movss (%eax), %xmm1 ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: pxor %xmm0, %xmm0 ; CHECK-NEXT: pxor %xmm0, %xmm0
; CHECK-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; CHECK-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@ -180,7 +180,7 @@ define void @test12() nounwind {
; CHECK: ## BB#0: ; CHECK: ## BB#0:
; CHECK-NEXT: movapd 0, %xmm0 ; CHECK-NEXT: movapd 0, %xmm0
; CHECK-NEXT: movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] ; CHECK-NEXT: movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
; CHECK-NEXT: movsd %xmm0, %xmm1 ; CHECK-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; CHECK-NEXT: xorpd %xmm2, %xmm2 ; CHECK-NEXT: xorpd %xmm2, %xmm2
; CHECK-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1] ; CHECK-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1]
; CHECK-NEXT: addps %xmm1, %xmm0 ; CHECK-NEXT: addps %xmm1, %xmm0
@ -293,7 +293,7 @@ entry:
define <2 x i64> @test_insert_64_zext(<2 x i64> %i) { define <2 x i64> @test_insert_64_zext(<2 x i64> %i) {
; CHECK-LABEL: test_insert_64_zext: ; CHECK-LABEL: test_insert_64_zext:
; CHECK: ## BB#0: ; CHECK: ## BB#0:
; CHECK-NEXT: movq %xmm0, %xmm0 ; CHECK-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; CHECK-NEXT: retl ; CHECK-NEXT: retl
%1 = shufflevector <2 x i64> %i, <2 x i64> <i64 0, i64 undef>, <2 x i32> <i32 0, i32 2> %1 = shufflevector <2 x i64> %i, <2 x i64> <i64 0, i64 undef>, <2 x i32> <i32 0, i32 2>
ret <2 x i64> %1 ret <2 x i64> %1
@ -303,7 +303,7 @@ define <4 x i32> @PR19721(<4 x i32> %i) {
; CHECK-LABEL: PR19721: ; CHECK-LABEL: PR19721:
; CHECK: ## BB#0: ; CHECK: ## BB#0:
; CHECK-NEXT: xorps %xmm1, %xmm1 ; CHECK-NEXT: xorps %xmm1, %xmm1
; CHECK-NEXT: movss %xmm1, %xmm0 ; CHECK-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; CHECK-NEXT: retl ; CHECK-NEXT: retl
%bc = bitcast <4 x i32> %i to i128 %bc = bitcast <4 x i32> %i to i128
%insert = and i128 %bc, -4294967296 %insert = and i128 %bc, -4294967296

View File

@ -78,13 +78,13 @@ define <2 x i64> @pmovzxbq_1() nounwind {
; X32-LABEL: pmovzxbq_1: ; X32-LABEL: pmovzxbq_1:
; X32: ## BB#0: ## %entry ; X32: ## BB#0: ## %entry
; X32-NEXT: movl L_g16$non_lazy_ptr, %eax ; X32-NEXT: movl L_g16$non_lazy_ptr, %eax
; X32-NEXT: pmovzxbq (%eax), %xmm0 ; X32-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: pmovzxbq_1: ; X64-LABEL: pmovzxbq_1:
; X64: ## BB#0: ## %entry ; X64: ## BB#0: ## %entry
; X64-NEXT: movq _g16@{{.*}}(%rip), %rax ; X64-NEXT: movq _g16@{{.*}}(%rip), %rax
; X64-NEXT: pmovzxbq (%rax), %xmm0 ; X64-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; X64-NEXT: retq ; X64-NEXT: retq
entry: entry:
%0 = load i16* @g16, align 2 ; <i16> [#uses=1] %0 = load i16* @g16, align 2 ; <i16> [#uses=1]
@ -202,7 +202,7 @@ declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i32) noun
define <4 x float> @insertps_2(<4 x float> %t1, float %t2) nounwind { define <4 x float> @insertps_2(<4 x float> %t1, float %t2) nounwind {
; X32-LABEL: insertps_2: ; X32-LABEL: insertps_2:
; X32: ## BB#0: ; X32: ## BB#0:
; X32-NEXT: insertps $0, {{[0-9]+}}(%esp), %xmm0 ; X32-NEXT: insertps {{.*#+}} xmm0 = mem[0],xmm0[1,2,3]
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: insertps_2: ; X64-LABEL: insertps_2:
@ -322,12 +322,12 @@ define <4 x float> @insertps_from_shufflevector_1(<4 x float> %a, <4 x float>* n
; X32-LABEL: insertps_from_shufflevector_1: ; X32-LABEL: insertps_from_shufflevector_1:
; X32: ## BB#0: ## %entry ; X32: ## BB#0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: insertps $48, (%eax), %xmm0 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: insertps_from_shufflevector_1: ; X64-LABEL: insertps_from_shufflevector_1:
; X64: ## BB#0: ## %entry ; X64: ## BB#0: ## %entry
; X64-NEXT: insertps $48, (%rdi), %xmm0 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X64-NEXT: retq ; X64-NEXT: retq
entry: entry:
%0 = load <4 x float>* %pb, align 16 %0 = load <4 x float>* %pb, align 16
@ -356,12 +356,12 @@ define <4 x i32> @pinsrd_from_shufflevector_i32(<4 x i32> %a, <4 x i32>* nocaptu
; X32-LABEL: pinsrd_from_shufflevector_i32: ; X32-LABEL: pinsrd_from_shufflevector_i32:
; X32: ## BB#0: ## %entry ; X32: ## BB#0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: insertps $48, (%eax), %xmm0 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: pinsrd_from_shufflevector_i32: ; X64-LABEL: pinsrd_from_shufflevector_i32:
; X64: ## BB#0: ## %entry ; X64: ## BB#0: ## %entry
; X64-NEXT: insertps $48, (%rdi), %xmm0 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X64-NEXT: retq ; X64-NEXT: retq
entry: entry:
%0 = load <4 x i32>* %pb, align 16 %0 = load <4 x i32>* %pb, align 16
@ -388,12 +388,12 @@ define <4 x float> @insertps_from_load_ins_elt_undef(<4 x float> %a, float* %b)
; X32-LABEL: insertps_from_load_ins_elt_undef: ; X32-LABEL: insertps_from_load_ins_elt_undef:
; X32: ## BB#0: ; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: insertps $16, (%eax), %xmm0 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: insertps_from_load_ins_elt_undef: ; X64-LABEL: insertps_from_load_ins_elt_undef:
; X64: ## BB#0: ; X64: ## BB#0:
; X64-NEXT: insertps $16, (%rdi), %xmm0 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
; X64-NEXT: retq ; X64-NEXT: retq
%1 = load float* %b, align 4 %1 = load float* %b, align 4
%2 = insertelement <4 x float> undef, float %1, i32 0 %2 = insertelement <4 x float> undef, float %1, i32 0
@ -406,13 +406,13 @@ define <4 x i32> @insertps_from_load_ins_elt_undef_i32(<4 x i32> %a, i32* %b) {
; X32-LABEL: insertps_from_load_ins_elt_undef_i32: ; X32-LABEL: insertps_from_load_ins_elt_undef_i32:
; X32: ## BB#0: ; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movd (%eax), %xmm1 ; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3] ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: insertps_from_load_ins_elt_undef_i32: ; X64-LABEL: insertps_from_load_ins_elt_undef_i32:
; X64: ## BB#0: ; X64: ## BB#0:
; X64-NEXT: movd (%rdi), %xmm1 ; X64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3] ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
; X64-NEXT: retq ; X64-NEXT: retq
%1 = load i32* %b, align 4 %1 = load i32* %b, align 4
@ -447,12 +447,12 @@ define <4 x float> @shuf_XYZ0(<4 x float> %x, <4 x float> %a) {
define <4 x float> @shuf_XY00(<4 x float> %x, <4 x float> %a) { define <4 x float> @shuf_XY00(<4 x float> %x, <4 x float> %a) {
; X32-LABEL: shuf_XY00: ; X32-LABEL: shuf_XY00:
; X32: ## BB#0: ; X32: ## BB#0:
; X32-NEXT: movq %xmm0, %xmm0 ; X32-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: shuf_XY00: ; X64-LABEL: shuf_XY00:
; X64: ## BB#0: ; X64: ## BB#0:
; X64-NEXT: movq %xmm0, %xmm0 ; X64-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; X64-NEXT: retq ; X64-NEXT: retq
%vecext = extractelement <4 x float> %x, i32 0 %vecext = extractelement <4 x float> %x, i32 0
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0 %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
@ -617,12 +617,12 @@ define <4 x i32> @i32_shuf_XYZ0(<4 x i32> %x, <4 x i32> %a) {
define <4 x i32> @i32_shuf_XY00(<4 x i32> %x, <4 x i32> %a) { define <4 x i32> @i32_shuf_XY00(<4 x i32> %x, <4 x i32> %a) {
; X32-LABEL: i32_shuf_XY00: ; X32-LABEL: i32_shuf_XY00:
; X32: ## BB#0: ; X32: ## BB#0:
; X32-NEXT: movq %xmm0, %xmm0 ; X32-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: i32_shuf_XY00: ; X64-LABEL: i32_shuf_XY00:
; X64: ## BB#0: ; X64: ## BB#0:
; X64-NEXT: movq %xmm0, %xmm0 ; X64-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; X64-NEXT: retq ; X64-NEXT: retq
%vecext = extractelement <4 x i32> %x, i32 0 %vecext = extractelement <4 x i32> %x, i32 0
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0 %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
@ -739,7 +739,7 @@ define <4 x i32> @i32_shuf_X00X(<4 x i32> %x, <4 x i32> %a) {
define <4 x i32> @i32_shuf_X0YC(<4 x i32> %x, <4 x i32> %a) { define <4 x i32> @i32_shuf_X0YC(<4 x i32> %x, <4 x i32> %a) {
; X32-LABEL: i32_shuf_X0YC: ; X32-LABEL: i32_shuf_X0YC:
; X32: ## BB#0: ; X32: ## BB#0:
; X32-NEXT: pmovzxdq %xmm0, %xmm2 ; X32-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1],zero ; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1],zero
; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2] ; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2]
; X32-NEXT: movaps %xmm2, %xmm0 ; X32-NEXT: movaps %xmm2, %xmm0
@ -747,7 +747,7 @@ define <4 x i32> @i32_shuf_X0YC(<4 x i32> %x, <4 x i32> %a) {
; ;
; X64-LABEL: i32_shuf_X0YC: ; X64-LABEL: i32_shuf_X0YC:
; X64: ## BB#0: ; X64: ## BB#0:
; X64-NEXT: pmovzxdq %xmm0, %xmm2 ; X64-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1],zero ; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1],zero
; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2] ; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2]
; X64-NEXT: movaps %xmm2, %xmm0 ; X64-NEXT: movaps %xmm2, %xmm0
@ -812,12 +812,12 @@ define <4 x float> @insertps_from_vector_load(<4 x float> %a, <4 x float>* nocap
; X32-LABEL: insertps_from_vector_load: ; X32-LABEL: insertps_from_vector_load:
; X32: ## BB#0: ; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: insertps $48, (%eax), %xmm0 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: insertps_from_vector_load: ; X64-LABEL: insertps_from_vector_load:
; X64: ## BB#0: ; X64: ## BB#0:
; X64-NEXT: insertps $48, (%rdi), %xmm0 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X64-NEXT: retq ; X64-NEXT: retq
%1 = load <4 x float>* %pb, align 16 %1 = load <4 x float>* %pb, align 16
%2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 48) %2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 48)
@ -830,12 +830,12 @@ define <4 x float> @insertps_from_vector_load_offset(<4 x float> %a, <4 x float>
; X32-LABEL: insertps_from_vector_load_offset: ; X32-LABEL: insertps_from_vector_load_offset:
; X32: ## BB#0: ; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: insertps $96, 4(%eax), %xmm0 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[1],xmm0[3]
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: insertps_from_vector_load_offset: ; X64-LABEL: insertps_from_vector_load_offset:
; X64: ## BB#0: ; X64: ## BB#0:
; X64-NEXT: insertps $96, 4(%rdi), %xmm0 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[1],xmm0[3]
; X64-NEXT: retq ; X64-NEXT: retq
%1 = load <4 x float>* %pb, align 16 %1 = load <4 x float>* %pb, align 16
%2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 96) %2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 96)
@ -849,13 +849,13 @@ define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, <4 x floa
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: shll $4, %ecx ; X32-NEXT: shll $4, %ecx
; X32-NEXT: insertps $192, 12(%eax,%ecx), %xmm0 ; X32-NEXT: insertps {{.*#+}} xmm0 = mem[3],xmm0[1,2,3]
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: insertps_from_vector_load_offset_2: ; X64-LABEL: insertps_from_vector_load_offset_2:
; X64: ## BB#0: ; X64: ## BB#0:
; X64-NEXT: shlq $4, %rsi ; X64-NEXT: shlq $4, %rsi
; X64-NEXT: insertps $192, 12(%rdi,%rsi), %xmm0 ; X64-NEXT: insertps {{.*#+}} xmm0 = mem[3],xmm0[1,2,3]
; X64-NEXT: retq ; X64-NEXT: retq
%1 = getelementptr inbounds <4 x float>* %pb, i64 %index %1 = getelementptr inbounds <4 x float>* %pb, i64 %index
%2 = load <4 x float>* %1, align 16 %2 = load <4 x float>* %1, align 16
@ -868,14 +868,14 @@ define <4 x float> @insertps_from_broadcast_loadf32(<4 x float> %a, float* nocap
; X32: ## BB#0: ; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movss (%ecx,%eax,4), %xmm1 ; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0] ; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: insertps_from_broadcast_loadf32: ; X64-LABEL: insertps_from_broadcast_loadf32:
; X64: ## BB#0: ; X64: ## BB#0:
; X64-NEXT: movss (%rdi,%rsi,4), %xmm1 ; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0] ; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
; X64-NEXT: retq ; X64-NEXT: retq
@ -920,7 +920,7 @@ define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x fl
; X32: ## BB#0: ; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movss (%ecx,%eax,4), %xmm4 ; X32-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
; X32-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,0,0,0] ; X32-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,0,0,0]
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0] ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0]
; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0] ; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
@ -933,7 +933,7 @@ define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x fl
; ;
; X64-LABEL: insertps_from_broadcast_multiple_use: ; X64-LABEL: insertps_from_broadcast_multiple_use:
; X64: ## BB#0: ; X64: ## BB#0:
; X64-NEXT: movss (%rdi,%rsi,4), %xmm4 ; X64-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
; X64-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,0,0,0] ; X64-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,0,0,0]
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0] ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0]
; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0] ; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
@ -963,14 +963,14 @@ define <4 x float> @insertps_with_undefs(<4 x float> %a, float* %b) {
; X32-LABEL: insertps_with_undefs: ; X32-LABEL: insertps_with_undefs:
; X32: ## BB#0: ; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movss (%eax), %xmm1 ; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],zero,xmm0[0],xmm1[3] ; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],zero,xmm0[0],xmm1[3]
; X32-NEXT: movaps %xmm1, %xmm0 ; X32-NEXT: movaps %xmm1, %xmm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: insertps_with_undefs: ; X64-LABEL: insertps_with_undefs:
; X64: ## BB#0: ; X64: ## BB#0:
; X64-NEXT: movss (%rdi), %xmm1 ; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],zero,xmm0[0],xmm1[3] ; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],zero,xmm0[0],xmm1[3]
; X64-NEXT: movaps %xmm1, %xmm0 ; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq ; X64-NEXT: retq
@ -986,12 +986,12 @@ define <4 x float> @pr20087(<4 x float> %a, <4 x float> *%ptr) {
; X32-LABEL: pr20087: ; X32-LABEL: pr20087:
; X32: ## BB#0: ; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: insertps $178, 8(%eax), %xmm0 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],mem[2]
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: pr20087: ; X64-LABEL: pr20087:
; X64: ## BB#0: ; X64: ## BB#0:
; X64-NEXT: insertps $178, 8(%rdi), %xmm0 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],mem[2]
; X64-NEXT: retq ; X64-NEXT: retq
%load = load <4 x float> *%ptr %load = load <4 x float> *%ptr
%ret = shufflevector <4 x float> %load, <4 x float> %a, <4 x i32> <i32 4, i32 undef, i32 6, i32 2> %ret = shufflevector <4 x float> %load, <4 x float> %a, <4 x i32> <i32 4, i32 undef, i32 6, i32 2>
@ -1004,14 +1004,14 @@ define void @insertps_pr20411(i32* noalias nocapture %RET) #1 {
; X32: ## BB#0: ; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movaps {{.*#+}} xmm0 = [3,3,3,3] ; X32-NEXT: movaps {{.*#+}} xmm0 = [3,3,3,3]
; X32-NEXT: insertps $220, LCPI49_1+12, %xmm0 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[3],zero,zero
; X32-NEXT: movups %xmm0, (%eax) ; X32-NEXT: movups %xmm0, (%eax)
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: insertps_pr20411: ; X64-LABEL: insertps_pr20411:
; X64: ## BB#0: ; X64: ## BB#0:
; X64-NEXT: movaps {{.*#+}} xmm0 = [3,3,3,3] ; X64-NEXT: movaps {{.*#+}} xmm0 = [3,3,3,3]
; X64-NEXT: insertps $220, LCPI49_1+{{.*}}(%rip), %xmm0 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[3],zero,zero
; X64-NEXT: movups %xmm0, (%rdi) ; X64-NEXT: movups %xmm0, (%rdi)
; X64-NEXT: retq ; X64-NEXT: retq
%gather_load = shufflevector <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, <8 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> %gather_load = shufflevector <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, <8 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@ -1025,12 +1025,12 @@ define void @insertps_pr20411(i32* noalias nocapture %RET) #1 {
define <4 x float> @insertps_4(<4 x float> %A, <4 x float> %B) { define <4 x float> @insertps_4(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_4: ; X32-LABEL: insertps_4:
; X32: ## BB#0: ; X32: ## BB#0: ## %entry
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: insertps_4: ; X64-LABEL: insertps_4:
; X64: ## BB#0: ; X64: ## BB#0: ## %entry
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero
; X64-NEXT: retq ; X64-NEXT: retq
entry: entry:
@ -1045,12 +1045,12 @@ entry:
define <4 x float> @insertps_5(<4 x float> %A, <4 x float> %B) { define <4 x float> @insertps_5(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_5: ; X32-LABEL: insertps_5:
; X32: ## BB#0: ; X32: ## BB#0: ## %entry
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[1],zero,zero ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[1],zero,zero
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: insertps_5: ; X64-LABEL: insertps_5:
; X64: ## BB#0: ; X64: ## BB#0: ## %entry
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[1],zero,zero ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[1],zero,zero
; X64-NEXT: retq ; X64-NEXT: retq
entry: entry:
@ -1065,12 +1065,12 @@ entry:
define <4 x float> @insertps_6(<4 x float> %A, <4 x float> %B) { define <4 x float> @insertps_6(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_6: ; X32-LABEL: insertps_6:
; X32: ## BB#0: ; X32: ## BB#0: ## %entry
; X32-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[1],xmm1[2],zero ; X32-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[1],xmm1[2],zero
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: insertps_6: ; X64-LABEL: insertps_6:
; X64: ## BB#0: ; X64: ## BB#0: ## %entry
; X64-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[1],xmm1[2],zero ; X64-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[1],xmm1[2],zero
; X64-NEXT: retq ; X64-NEXT: retq
entry: entry:
@ -1084,12 +1084,12 @@ entry:
define <4 x float> @insertps_7(<4 x float> %A, <4 x float> %B) { define <4 x float> @insertps_7(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_7: ; X32-LABEL: insertps_7:
; X32: ## BB#0: ; X32: ## BB#0: ## %entry
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[1],zero ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[1],zero
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: insertps_7: ; X64-LABEL: insertps_7:
; X64: ## BB#0: ; X64: ## BB#0: ## %entry
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[1],zero ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[1],zero
; X64-NEXT: retq ; X64-NEXT: retq
entry: entry:
@ -1104,12 +1104,12 @@ entry:
define <4 x float> @insertps_8(<4 x float> %A, <4 x float> %B) { define <4 x float> @insertps_8(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_8: ; X32-LABEL: insertps_8:
; X32: ## BB#0: ; X32: ## BB#0: ## %entry
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: insertps_8: ; X64-LABEL: insertps_8:
; X64: ## BB#0: ; X64: ## BB#0: ## %entry
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
; X64-NEXT: retq ; X64-NEXT: retq
entry: entry:
@ -1124,13 +1124,13 @@ entry:
define <4 x float> @insertps_9(<4 x float> %A, <4 x float> %B) { define <4 x float> @insertps_9(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_9: ; X32-LABEL: insertps_9:
; X32: ## BB#0: ; X32: ## BB#0: ## %entry
; X32-NEXT: insertps {{.*#+}} xmm1 = zero,xmm0[0],xmm1[2],zero ; X32-NEXT: insertps {{.*#+}} xmm1 = zero,xmm0[0],xmm1[2],zero
; X32-NEXT: movaps %xmm1, %xmm0 ; X32-NEXT: movaps %xmm1, %xmm0
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: insertps_9: ; X64-LABEL: insertps_9:
; X64: ## BB#0: ; X64: ## BB#0: ## %entry
; X64-NEXT: insertps {{.*#+}} xmm1 = zero,xmm0[0],xmm1[2],zero ; X64-NEXT: insertps {{.*#+}} xmm1 = zero,xmm0[0],xmm1[2],zero
; X64-NEXT: movaps %xmm1, %xmm0 ; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq ; X64-NEXT: retq
@ -1144,7 +1144,6 @@ entry:
} }
define <4 x float> @insertps_10(<4 x float> %A) define <4 x float> @insertps_10(<4 x float> %A)
{
; X32-LABEL: insertps_10: ; X32-LABEL: insertps_10:
; X32: ## BB#0: ; X32: ## BB#0:
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero
@ -1154,6 +1153,7 @@ define <4 x float> @insertps_10(<4 x float> %A)
; X64: ## BB#0: ; X64: ## BB#0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero
; X64-NEXT: retq ; X64-NEXT: retq
{
%vecext = extractelement <4 x float> %A, i32 0 %vecext = extractelement <4 x float> %A, i32 0
%vecbuild1 = insertelement <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, float %vecext, i32 0 %vecbuild1 = insertelement <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, float %vecext, i32 0
%vecbuild2 = insertelement <4 x float> %vecbuild1, float %vecext, i32 2 %vecbuild2 = insertelement <4 x float> %vecbuild1, float %vecext, i32 2
@ -1162,13 +1162,13 @@ define <4 x float> @insertps_10(<4 x float> %A)
define <4 x float> @build_vector_to_shuffle_1(<4 x float> %A) { define <4 x float> @build_vector_to_shuffle_1(<4 x float> %A) {
; X32-LABEL: build_vector_to_shuffle_1: ; X32-LABEL: build_vector_to_shuffle_1:
; X32: ## BB#0: ; X32: ## BB#0: ## %entry
; X32-NEXT: xorps %xmm1, %xmm1 ; X32-NEXT: xorps %xmm1, %xmm1
; X32-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3] ; X32-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: build_vector_to_shuffle_1: ; X64-LABEL: build_vector_to_shuffle_1:
; X64: ## BB#0: ; X64: ## BB#0: ## %entry
; X64-NEXT: xorps %xmm1, %xmm1 ; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3] ; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
; X64-NEXT: retq ; X64-NEXT: retq
@ -1182,13 +1182,13 @@ entry:
define <4 x float> @build_vector_to_shuffle_2(<4 x float> %A) { define <4 x float> @build_vector_to_shuffle_2(<4 x float> %A) {
; X32-LABEL: build_vector_to_shuffle_2: ; X32-LABEL: build_vector_to_shuffle_2:
; X32: ## BB#0: ; X32: ## BB#0: ## %entry
; X32-NEXT: xorps %xmm1, %xmm1 ; X32-NEXT: xorps %xmm1, %xmm1
; X32-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3] ; X32-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
; X32-NEXT: retl ; X32-NEXT: retl
; ;
; X64-LABEL: build_vector_to_shuffle_2: ; X64-LABEL: build_vector_to_shuffle_2:
; X64: ## BB#0: ; X64: ## BB#0: ## %entry
; X64-NEXT: xorps %xmm1, %xmm1 ; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3] ; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
; X64-NEXT: retq ; X64-NEXT: retq

View File

@ -1386,7 +1386,7 @@ define <4 x i32> @shuffle_v4i32_z0zX(<4 x i32> %a) {
; ;
; AVX-LABEL: shuffle_v4i32_z0zX: ; AVX-LABEL: shuffle_v4i32_z0zX:
; AVX: # BB#0: ; AVX: # BB#0:
; AVX-NEXT: vpsllq $32, %xmm0 ; AVX-NEXT: vpsllq $32, %xmm0, %xmm0
; AVX-NEXT: retq ; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32> <i32 4, i32 0, i32 4, i32 undef> %shuffle = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32> <i32 4, i32 0, i32 4, i32 undef>
ret <4 x i32> %shuffle ret <4 x i32> %shuffle
@ -1400,7 +1400,7 @@ define <4 x i32> @shuffle_v4i32_1z3z(<4 x i32> %a) {
; ;
; AVX-LABEL: shuffle_v4i32_1z3z: ; AVX-LABEL: shuffle_v4i32_1z3z:
; AVX: # BB#0: ; AVX: # BB#0:
; AVX-NEXT: vpsrlq $32, %xmm0 ; AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
; AVX-NEXT: retq ; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 4, i32 3, i32 4> %shuffle = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 4, i32 3, i32 4>
ret <4 x i32> %shuffle ret <4 x i32> %shuffle

View File

@ -554,24 +554,24 @@ define <4 x i32> @combine_bitwise_ops_test2c(<4 x i32> %a, <4 x i32> %b, <4 x i3
define <4 x i32> @combine_bitwise_ops_test3c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) { define <4 x i32> @combine_bitwise_ops_test3c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE2-LABEL: combine_bitwise_ops_test3c: ; SSE2-LABEL: combine_bitwise_ops_test3c:
; SSE2: # BB#0: ; SSE2: # BB#0:
; SSE2-NEXT: xorps %xmm1, %xmm0 ; SSE2-NEXT: xorps %xmm1, %xmm0
; SSE2-NEXT: xorps %xmm1, %xmm1 ; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3] ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; ;
; SSSE3-LABEL: combine_bitwise_ops_test3c: ; SSSE3-LABEL: combine_bitwise_ops_test3c:
; SSSE3: # BB#0: ; SSSE3: # BB#0:
; SSSE3-NEXT: xorps %xmm1, %xmm0 ; SSSE3-NEXT: xorps %xmm1, %xmm0
; SSSE3-NEXT: xorps %xmm1, %xmm1 ; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3] ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
; SSSE3-NEXT: retq ; SSSE3-NEXT: retq
; ;
; SSE41-LABEL: combine_bitwise_ops_test3c: ; SSE41-LABEL: combine_bitwise_ops_test3c:
; SSE41: # BB#0: ; SSE41: # BB#0:
; SSE41-NEXT: xorps %xmm1, %xmm0 ; SSE41-NEXT: xorps %xmm1, %xmm0
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero ; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; SSE41-NEXT: retq ; SSE41-NEXT: retq
; ;
; AVX-LABEL: combine_bitwise_ops_test3c: ; AVX-LABEL: combine_bitwise_ops_test3c:
; AVX: # BB#0: ; AVX: # BB#0:
@ -1148,13 +1148,13 @@ define <4 x float> @combine_test1(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_test2(<4 x float> %a, <4 x float> %b) { define <4 x float> @combine_test2(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_test2: ; SSE2-LABEL: combine_test2:
; SSE2: # BB#0: ; SSE2: # BB#0:
; SSE2-NEXT: movss %xmm0, %xmm1 ; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; ;
; SSSE3-LABEL: combine_test2: ; SSSE3-LABEL: combine_test2:
; SSSE3: # BB#0: ; SSSE3: # BB#0:
; SSSE3-NEXT: movss %xmm0, %xmm1 ; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0 ; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq ; SSSE3-NEXT: retq
; ;
@ -1256,13 +1256,13 @@ define <4 x i32> @combine_test6(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @combine_test7(<4 x i32> %a, <4 x i32> %b) { define <4 x i32> @combine_test7(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: combine_test7: ; SSE2-LABEL: combine_test7:
; SSE2: # BB#0: ; SSE2: # BB#0:
; SSE2-NEXT: movss %xmm0, %xmm1 ; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; ;
; SSSE3-LABEL: combine_test7: ; SSSE3-LABEL: combine_test7:
; SSSE3: # BB#0: ; SSSE3: # BB#0:
; SSSE3-NEXT: movss %xmm0, %xmm1 ; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0 ; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq ; SSSE3-NEXT: retq
; ;
@ -1368,13 +1368,13 @@ define <4 x float> @combine_test11(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_test12(<4 x float> %a, <4 x float> %b) { define <4 x float> @combine_test12(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_test12: ; SSE2-LABEL: combine_test12:
; SSE2: # BB#0: ; SSE2: # BB#0:
; SSE2-NEXT: movss %xmm0, %xmm1 ; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; ;
; SSSE3-LABEL: combine_test12: ; SSSE3-LABEL: combine_test12:
; SSSE3: # BB#0: ; SSSE3: # BB#0:
; SSSE3-NEXT: movss %xmm0, %xmm1 ; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0 ; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq ; SSSE3-NEXT: retq
; ;
@ -1467,13 +1467,13 @@ define <4 x i32> @combine_test16(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @combine_test17(<4 x i32> %a, <4 x i32> %b) { define <4 x i32> @combine_test17(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: combine_test17: ; SSE2-LABEL: combine_test17:
; SSE2: # BB#0: ; SSE2: # BB#0:
; SSE2-NEXT: movss %xmm0, %xmm1 ; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; ;
; SSSE3-LABEL: combine_test17: ; SSSE3-LABEL: combine_test17:
; SSSE3: # BB#0: ; SSSE3: # BB#0:
; SSSE3-NEXT: movss %xmm0, %xmm1 ; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0 ; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq ; SSSE3-NEXT: retq
; ;
@ -1568,26 +1568,26 @@ define <4 x i32> @combine_test21(<8 x i32> %a, <4 x i32>* %ptr) {
; SSE-LABEL: combine_test21: ; SSE-LABEL: combine_test21:
; SSE: # BB#0: ; SSE: # BB#0:
; SSE-NEXT: movdqa %xmm0, %xmm2 ; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; SSE-NEXT: movdqa %xmm2, ; SSE-NEXT: movdqa %xmm2, (%rdi)
; SSE-NEXT: retq ; SSE-NEXT: retq
; ;
; AVX1-LABEL: combine_test21: ; AVX1-LABEL: combine_test21:
; AVX1: # BB#0: ; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm0[0],xmm1[0] ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm0[0],xmm1[0]
; AVX1-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; AVX1-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; AVX1-NEXT: movdqa %xmm2, ; AVX1-NEXT: vmovdqa %xmm2, (%rdi)
; AVX1-NEXT: vzeroupper ; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq ; AVX1-NEXT: retq
; ;
; AVX2-LABEL: combine_test21: ; AVX2-LABEL: combine_test21:
; AVX2: # BB#0: ; AVX2: # BB#0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm0[0],xmm1[0] ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm0[0],xmm1[0]
; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; AVX2-NEXT: movdqa %xmm2, ; AVX2-NEXT: vmovdqa %xmm2, (%rdi)
; AVX2-NEXT: vzeroupper ; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq ; AVX2-NEXT: retq
%1 = shufflevector <8 x i32> %a, <8 x i32> %a, <4 x i32> <i32 0, i32 1, i32 4, i32 5> %1 = shufflevector <8 x i32> %a, <8 x i32> %a, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@ -1599,16 +1599,15 @@ define <4 x i32> @combine_test21(<8 x i32> %a, <4 x i32>* %ptr) {
define <8 x float> @combine_test22(<2 x float>* %a, <2 x float>* %b) { define <8 x float> @combine_test22(<2 x float>* %a, <2 x float>* %b) {
; SSE-LABEL: combine_test22: ; SSE-LABEL: combine_test22:
; SSE: # BB#0: ; SSE: # BB#0:
; SSE-NEXT: movq (%rdi), %xmm0 ; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movhpd (%rsi), %xmm0 ; SSE-NEXT: movhpd (%rsi), %xmm0
; SSE-NEXT: retq ; SSE-NEXT: retq
; ;
; AVX1-LABEL: combine_test22: ; AVX-LABEL: combine_test22:
; AVX1: # BB#0: ; AVX: # BB#0:
; AVX1-NEXT: vmovq (%rdi), %xmm0 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX1-NEXT: vmovhpd (%rsi), %xmm0, %xmm0 ; AVX-NEXT: vmovhpd (%rsi), %xmm0, %xmm0
; AVX1-NEXT: retq ; AVX-NEXT: retq
;
; Current AVX2 lowering of this is still awful, not adding a test case. ; Current AVX2 lowering of this is still awful, not adding a test case.
%1 = load <2 x float>* %a, align 8 %1 = load <2 x float>* %a, align 8
%2 = load <2 x float>* %b, align 8 %2 = load <2 x float>* %b, align 8
@ -1641,23 +1640,23 @@ define <4 x float> @combine_test2b(<4 x float> %a, <4 x float> %b) {
; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0,0] ; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0,0]
; SSE2-NEXT: movaps %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; ;
; SSSE3-LABEL: combine_test2b: ; SSSE3-LABEL: combine_test2b:
; SSSE3: # BB#0: ; SSSE3: # BB#0:
; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0] ; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0]
; SSSE3-NEXT: retq ; SSSE3-NEXT: retq
; ;
; SSE41-LABEL: combine_test2b: ; SSE41-LABEL: combine_test2b:
; SSE41: # BB#0: ; SSE41: # BB#0:
; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0] ; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0]
; SSE41-NEXT: retq ; SSE41-NEXT: retq
; ;
; AVX-LABEL: combine_test2b: ; AVX-LABEL: combine_test2b:
; AVX: # BB#0: ; AVX: # BB#0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm1[0,0] ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm1[0,0]
; AVX-NEXT: retq ; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3> %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
%2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 0, i32 5> %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 0, i32 5>
ret <4 x float> %2 ret <4 x float> %2
} }
@ -1705,44 +1704,44 @@ define <4 x float> @combine_test4b(<4 x float> %a, <4 x float> %b) {
define <4 x i8> @combine_test1c(<4 x i8>* %a, <4 x i8>* %b) { define <4 x i8> @combine_test1c(<4 x i8>* %a, <4 x i8>* %b) {
; SSE2-LABEL: combine_test1c: ; SSE2-LABEL: combine_test1c:
; SSE2: # BB#0: ; SSE2: # BB#0:
; SSE2-NEXT: movd (%rdi), %xmm1 ; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-NEXT: movd (%rsi), %xmm0 ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: movss %xmm1, %xmm0 ; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; ;
; SSSE3-LABEL: combine_test1c: ; SSSE3-LABEL: combine_test1c:
; SSSE3: # BB#0: ; SSSE3: # BB#0:
; SSSE3-NEXT: movd (%rdi), %xmm1 ; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSSE3-NEXT: movd (%rsi), %xmm0 ; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: movss %xmm1, %xmm0 ; SSSE3-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSSE3-NEXT: retq ; SSSE3-NEXT: retq
; ;
; SSE41-LABEL: combine_test1c: ; SSE41-LABEL: combine_test1c:
; SSE41: # BB#0: ; SSE41: # BB#0:
; SSE41-NEXT: pmovzxbd (%rdi), %xmm1 ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: pmovzxbd (%rsi), %xmm0 ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7] ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
; SSE41-NEXT: retq ; SSE41-NEXT: retq
; ;
; AVX1-LABEL: combine_test1c: ; AVX1-LABEL: combine_test1c:
; AVX1: # BB#0: ; AVX1: # BB#0:
; AVX1-NEXT: vpmovzxbd (%rdi), %xmm0 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd (%rsi), %xmm1 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7] ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; AVX1-NEXT: retq ; AVX1-NEXT: retq
; ;
; AVX2-LABEL: combine_test1c: ; AVX2-LABEL: combine_test1c:
; AVX2: # BB#0: ; AVX2: # BB#0:
; AVX2-NEXT: vpmovzxbd (%rdi), %xmm0 ; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX2-NEXT: vpmovzxbd (%rsi), %xmm1 ; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX2-NEXT: retq ; AVX2-NEXT: retq
%A = load <4 x i8>* %a %A = load <4 x i8>* %a
@ -1755,10 +1754,10 @@ define <4 x i8> @combine_test1c(<4 x i8>* %a, <4 x i8>* %b) {
define <4 x i8> @combine_test2c(<4 x i8>* %a, <4 x i8>* %b) { define <4 x i8> @combine_test2c(<4 x i8>* %a, <4 x i8>* %b) {
; SSE2-LABEL: combine_test2c: ; SSE2-LABEL: combine_test2c:
; SSE2: # BB#0: ; SSE2: # BB#0:
; SSE2-NEXT: movd (%rdi), %xmm0 ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: movd (%rsi), %xmm1 ; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@ -1766,10 +1765,10 @@ define <4 x i8> @combine_test2c(<4 x i8>* %a, <4 x i8>* %b) {
; ;
; SSSE3-LABEL: combine_test2c: ; SSSE3-LABEL: combine_test2c:
; SSSE3: # BB#0: ; SSSE3: # BB#0:
; SSSE3-NEXT: movd (%rdi), %xmm0 ; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: movd (%rsi), %xmm1 ; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@ -1777,15 +1776,15 @@ define <4 x i8> @combine_test2c(<4 x i8>* %a, <4 x i8>* %b) {
; ;
; SSE41-LABEL: combine_test2c: ; SSE41-LABEL: combine_test2c:
; SSE41: # BB#0: ; SSE41: # BB#0:
; SSE41-NEXT: pmovzxbd (%rdi), %xmm0 ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: pmovzxbd (%rsi), %xmm1 ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE41-NEXT: retq ; SSE41-NEXT: retq
; ;
; AVX-LABEL: combine_test2c: ; AVX-LABEL: combine_test2c:
; AVX: # BB#0: ; AVX: # BB#0:
; AVX-NEXT: vpmovzxbd (%rdi), %xmm0 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX-NEXT: vpmovzxbd (%rsi), %xmm1 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq ; AVX-NEXT: retq
%A = load <4 x i8>* %a %A = load <4 x i8>* %a
@ -1798,10 +1797,10 @@ define <4 x i8> @combine_test2c(<4 x i8>* %a, <4 x i8>* %b) {
define <4 x i8> @combine_test3c(<4 x i8>* %a, <4 x i8>* %b) { define <4 x i8> @combine_test3c(<4 x i8>* %a, <4 x i8>* %b) {
; SSE2-LABEL: combine_test3c: ; SSE2-LABEL: combine_test3c:
; SSE2: # BB#0: ; SSE2: # BB#0:
; SSE2-NEXT: movd (%rdi), %xmm1 ; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-NEXT: movd (%rsi), %xmm0 ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
@ -1809,10 +1808,10 @@ define <4 x i8> @combine_test3c(<4 x i8>* %a, <4 x i8>* %b) {
; ;
; SSSE3-LABEL: combine_test3c: ; SSSE3-LABEL: combine_test3c:
; SSSE3: # BB#0: ; SSSE3: # BB#0:
; SSSE3-NEXT: movd (%rdi), %xmm1 ; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSSE3-NEXT: movd (%rsi), %xmm0 ; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; SSSE3-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
@ -1820,15 +1819,15 @@ define <4 x i8> @combine_test3c(<4 x i8>* %a, <4 x i8>* %b) {
; ;
; SSE41-LABEL: combine_test3c: ; SSE41-LABEL: combine_test3c:
; SSE41: # BB#0: ; SSE41: # BB#0:
; SSE41-NEXT: pmovzxbd (%rdi), %xmm1 ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: pmovzxbd (%rsi), %xmm0 ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; SSE41-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; SSE41-NEXT: retq ; SSE41-NEXT: retq
; ;
; AVX-LABEL: combine_test3c: ; AVX-LABEL: combine_test3c:
; AVX: # BB#0: ; AVX: # BB#0:
; AVX-NEXT: vpmovzxbd (%rdi), %xmm0 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX-NEXT: vpmovzxbd (%rsi), %xmm1 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm1[1],xmm0[1] ; AVX-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm1[1],xmm0[1]
; AVX-NEXT: retq ; AVX-NEXT: retq
%A = load <4 x i8>* %a %A = load <4 x i8>* %a
@ -1841,10 +1840,10 @@ define <4 x i8> @combine_test3c(<4 x i8>* %a, <4 x i8>* %b) {
define <4 x i8> @combine_test4c(<4 x i8>* %a, <4 x i8>* %b) { define <4 x i8> @combine_test4c(<4 x i8>* %a, <4 x i8>* %b) {
; SSE2-LABEL: combine_test4c: ; SSE2-LABEL: combine_test4c:
; SSE2: # BB#0: ; SSE2: # BB#0:
; SSE2-NEXT: movd (%rdi), %xmm1 ; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-NEXT: movd (%rsi), %xmm2 ; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSE2-NEXT: movdqa %xmm2, %xmm0 ; SSE2-NEXT: movdqa %xmm2, %xmm0
@ -1856,10 +1855,10 @@ define <4 x i8> @combine_test4c(<4 x i8>* %a, <4 x i8>* %b) {
; ;
; SSSE3-LABEL: combine_test4c: ; SSSE3-LABEL: combine_test4c:
; SSSE3: # BB#0: ; SSSE3: # BB#0:
; SSSE3-NEXT: movd (%rdi), %xmm1 ; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSSE3-NEXT: movd (%rsi), %xmm2 ; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSSE3-NEXT: movdqa %xmm2, %xmm0 ; SSSE3-NEXT: movdqa %xmm2, %xmm0
@ -1871,22 +1870,22 @@ define <4 x i8> @combine_test4c(<4 x i8>* %a, <4 x i8>* %b) {
; ;
; SSE41-LABEL: combine_test4c: ; SSE41-LABEL: combine_test4c:
; SSE41: # BB#0: ; SSE41: # BB#0:
; SSE41-NEXT: pmovzxbd (%rdi), %xmm1 ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: pmovzxbd (%rsi), %xmm0 ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7] ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq ; SSE41-NEXT: retq
; ;
; AVX1-LABEL: combine_test4c: ; AVX1-LABEL: combine_test4c:
; AVX1: # BB#0: ; AVX1: # BB#0:
; AVX1-NEXT: vpmovzxbd (%rdi), %xmm0 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd (%rsi), %xmm1 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7] ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
; AVX1-NEXT: retq ; AVX1-NEXT: retq
; ;
; AVX2-LABEL: combine_test4c: ; AVX2-LABEL: combine_test4c:
; AVX2: # BB#0: ; AVX2: # BB#0:
; AVX2-NEXT: vpmovzxbd (%rdi), %xmm0 ; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX2-NEXT: vpmovzxbd (%rsi), %xmm1 ; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3] ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
; AVX2-NEXT: retq ; AVX2-NEXT: retq
%A = load <4 x i8>* %a %A = load <4 x i8>* %a
@ -1929,12 +1928,12 @@ define <4 x i8> @combine_test4c(<4 x i8>* %a, <4 x i8>* %b) {
define <4 x float> @combine_blend_01(<4 x float> %a, <4 x float> %b) { define <4 x float> @combine_blend_01(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_blend_01: ; SSE2-LABEL: combine_blend_01:
; SSE2: # BB#0: ; SSE2: # BB#0:
; SSE2-NEXT: movsd %xmm1, %xmm0 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; ;
; SSSE3-LABEL: combine_blend_01: ; SSSE3-LABEL: combine_blend_01:
; SSSE3: # BB#0: ; SSSE3: # BB#0:
; SSSE3-NEXT: movsd %xmm1, %xmm0 ; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq ; SSSE3-NEXT: retq
; ;
; SSE41-LABEL: combine_blend_01: ; SSE41-LABEL: combine_blend_01:
@ -1954,14 +1953,14 @@ define <4 x float> @combine_blend_01(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_blend_02(<4 x float> %a, <4 x float> %b) { define <4 x float> @combine_blend_02(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_blend_02: ; SSE2-LABEL: combine_blend_02:
; SSE2: # BB#0: ; SSE2: # BB#0:
; SSE2-NEXT: movss %xmm1, %xmm0 ; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0] ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2] ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; ;
; SSSE3-LABEL: combine_blend_02: ; SSSE3-LABEL: combine_blend_02:
; SSSE3: # BB#0: ; SSSE3: # BB#0:
; SSSE3-NEXT: movss %xmm1, %xmm0 ; SSSE3-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0] ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2] ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSSE3-NEXT: retq ; SSSE3-NEXT: retq
@ -1983,13 +1982,13 @@ define <4 x float> @combine_blend_02(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_blend_123(<4 x float> %a, <4 x float> %b) { define <4 x float> @combine_blend_123(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_blend_123: ; SSE2-LABEL: combine_blend_123:
; SSE2: # BB#0: ; SSE2: # BB#0:
; SSE2-NEXT: movss %xmm0, %xmm1 ; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; ;
; SSSE3-LABEL: combine_blend_123: ; SSSE3-LABEL: combine_blend_123:
; SSSE3: # BB#0: ; SSSE3: # BB#0:
; SSSE3-NEXT: movss %xmm0, %xmm1 ; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0 ; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq ; SSSE3-NEXT: retq
; ;
@ -2063,12 +2062,12 @@ define <4 x i32> @combine_test_movhl_3(<4 x i32> %a, <4 x i32> %b) {
define <4 x float> @combine_undef_input_test1(<4 x float> %a, <4 x float> %b) { define <4 x float> @combine_undef_input_test1(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_undef_input_test1: ; SSE2-LABEL: combine_undef_input_test1:
; SSE2: # BB#0: ; SSE2: # BB#0:
; SSE2-NEXT: movsd %xmm1, %xmm0 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; ;
; SSSE3-LABEL: combine_undef_input_test1: ; SSSE3-LABEL: combine_undef_input_test1:
; SSSE3: # BB#0: ; SSSE3: # BB#0:
; SSSE3-NEXT: movsd %xmm1, %xmm0 ; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq ; SSSE3-NEXT: retq
; ;
; SSE41-LABEL: combine_undef_input_test1: ; SSE41-LABEL: combine_undef_input_test1:
@ -2134,13 +2133,13 @@ define <4 x float> @combine_undef_input_test4(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_undef_input_test5(<4 x float> %a, <4 x float> %b) { define <4 x float> @combine_undef_input_test5(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_undef_input_test5: ; SSE2-LABEL: combine_undef_input_test5:
; SSE2: # BB#0: ; SSE2: # BB#0:
; SSE2-NEXT: movsd %xmm0, %xmm1 ; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: movaps %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; ;
; SSSE3-LABEL: combine_undef_input_test5: ; SSSE3-LABEL: combine_undef_input_test5:
; SSSE3: # BB#0: ; SSSE3: # BB#0:
; SSSE3-NEXT: movsd %xmm0, %xmm1 ; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSSE3-NEXT: movaps %xmm1, %xmm0 ; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq ; SSSE3-NEXT: retq
; ;
@ -2176,23 +2175,23 @@ define <4 x float> @combine_undef_input_test7(<4 x float> %a) {
; SSE2: # BB#0: ; SSE2: # BB#0:
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0] ; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; ;
; SSSE3-LABEL: combine_undef_input_test7: ; SSSE3-LABEL: combine_undef_input_test7:
; SSSE3: # BB#0: ; SSSE3: # BB#0:
; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0] ; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSSE3-NEXT: retq ; SSSE3-NEXT: retq
; ;
; SSE41-LABEL: combine_undef_input_test7: ; SSE41-LABEL: combine_undef_input_test7:
; SSE41: # BB#0: ; SSE41: # BB#0:
; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0] ; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE41-NEXT: retq ; SSE41-NEXT: retq
; ;
; AVX-LABEL: combine_undef_input_test7: ; AVX-LABEL: combine_undef_input_test7:
; AVX: # BB#0: ; AVX: # BB#0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX-NEXT: retq ; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 6, i32 0, i32 1, i32 7> %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 6, i32 0, i32 1, i32 7>
%2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 1, i32 2, i32 4, i32 5> %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 1, i32 2, i32 4, i32 5>
ret <4 x float> %2 ret <4 x float> %2
} }
@ -2201,23 +2200,23 @@ define <4 x float> @combine_undef_input_test8(<4 x float> %a) {
; SSE2: # BB#0: ; SSE2: # BB#0:
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0] ; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; ;
; SSSE3-LABEL: combine_undef_input_test8: ; SSSE3-LABEL: combine_undef_input_test8:
; SSSE3: # BB#0: ; SSSE3: # BB#0:
; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0] ; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSSE3-NEXT: retq ; SSSE3-NEXT: retq
; ;
; SSE41-LABEL: combine_undef_input_test8: ; SSE41-LABEL: combine_undef_input_test8:
; SSE41: # BB#0: ; SSE41: # BB#0:
; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0] ; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE41-NEXT: retq ; SSE41-NEXT: retq
; ;
; AVX-LABEL: combine_undef_input_test8: ; AVX-LABEL: combine_undef_input_test8:
; AVX: # BB#0: ; AVX: # BB#0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX-NEXT: retq ; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 1, i32 7> %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
%2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 2, i32 4, i32 1> %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 2, i32 4, i32 1>
ret <4 x float> %2 ret <4 x float> %2
} }
@ -2248,12 +2247,12 @@ define <4 x float> @combine_undef_input_test10(<4 x float> %a) {
define <4 x float> @combine_undef_input_test11(<4 x float> %a, <4 x float> %b) { define <4 x float> @combine_undef_input_test11(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_undef_input_test11: ; SSE2-LABEL: combine_undef_input_test11:
; SSE2: # BB#0: ; SSE2: # BB#0:
; SSE2-NEXT: movsd %xmm1, %xmm0 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; ;
; SSSE3-LABEL: combine_undef_input_test11: ; SSSE3-LABEL: combine_undef_input_test11:
; SSSE3: # BB#0: ; SSSE3: # BB#0:
; SSSE3-NEXT: movsd %xmm1, %xmm0 ; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq ; SSSE3-NEXT: retq
; ;
; SSE41-LABEL: combine_undef_input_test11: ; SSE41-LABEL: combine_undef_input_test11:
@ -2319,13 +2318,13 @@ define <4 x float> @combine_undef_input_test14(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_undef_input_test15(<4 x float> %a, <4 x float> %b) { define <4 x float> @combine_undef_input_test15(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_undef_input_test15: ; SSE2-LABEL: combine_undef_input_test15:
; SSE2: # BB#0: ; SSE2: # BB#0:
; SSE2-NEXT: movsd %xmm0, %xmm1 ; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: movaps %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; ;
; SSSE3-LABEL: combine_undef_input_test15: ; SSSE3-LABEL: combine_undef_input_test15:
; SSSE3: # BB#0: ; SSSE3: # BB#0:
; SSSE3-NEXT: movsd %xmm0, %xmm1 ; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSSE3-NEXT: movaps %xmm1, %xmm0 ; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq ; SSSE3-NEXT: retq
; ;
@ -2367,23 +2366,23 @@ define <4 x float> @combine_undef_input_test17(<4 x float> %a) {
; SSE2: # BB#0: ; SSE2: # BB#0:
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0] ; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; ;
; SSSE3-LABEL: combine_undef_input_test17: ; SSSE3-LABEL: combine_undef_input_test17:
; SSSE3: # BB#0: ; SSSE3: # BB#0:
; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0] ; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSSE3-NEXT: retq ; SSSE3-NEXT: retq
; ;
; SSE41-LABEL: combine_undef_input_test17: ; SSE41-LABEL: combine_undef_input_test17:
; SSE41: # BB#0: ; SSE41: # BB#0:
; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0] ; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE41-NEXT: retq ; SSE41-NEXT: retq
; ;
; AVX-LABEL: combine_undef_input_test17: ; AVX-LABEL: combine_undef_input_test17:
; AVX: # BB#0: ; AVX: # BB#0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX-NEXT: retq ; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 6, i32 0, i32 1, i32 7> %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 6, i32 0, i32 1, i32 7>
%2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 5, i32 6, i32 0, i32 1> %2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 5, i32 6, i32 0, i32 1>
ret <4 x float> %2 ret <4 x float> %2
} }
@ -2392,23 +2391,23 @@ define <4 x float> @combine_undef_input_test18(<4 x float> %a) {
; SSE2: # BB#0: ; SSE2: # BB#0:
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0] ; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; ;
; SSSE3-LABEL: combine_undef_input_test18: ; SSSE3-LABEL: combine_undef_input_test18:
; SSSE3: # BB#0: ; SSSE3: # BB#0:
; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0] ; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSSE3-NEXT: retq ; SSSE3-NEXT: retq
; ;
; SSE41-LABEL: combine_undef_input_test18: ; SSE41-LABEL: combine_undef_input_test18:
; SSE41: # BB#0: ; SSE41: # BB#0:
; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0] ; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE41-NEXT: retq ; SSE41-NEXT: retq
; ;
; AVX-LABEL: combine_undef_input_test18: ; AVX-LABEL: combine_undef_input_test18:
; AVX: # BB#0: ; AVX: # BB#0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX-NEXT: retq ; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 1, i32 7> %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
%2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 4, i32 6, i32 0, i32 5> %2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 4, i32 6, i32 0, i32 5>
ret <4 x float> %2 ret <4 x float> %2
} }
@ -2497,6 +2496,26 @@ define <8 x i32> @combine_unneeded_subvector2(<8 x i32> %a, <8 x i32> %b) {
} }
define <4 x float> @combine_insertps1(<4 x float> %a, <4 x float> %b) { define <4 x float> @combine_insertps1(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_insertps1:
; SSE2: # BB#0:
; SSE2-NEXT: movaps %xmm0, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,2],xmm0[1,3]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
; SSE2-NEXT: movaps %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_insertps1:
; SSSE3: # BB#0:
; SSSE3-NEXT: movaps %xmm0, %xmm2
; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[2,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,2],xmm0[1,3]
; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
; SSSE3-NEXT: movaps %xmm2, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_insertps1: ; SSE41-LABEL: combine_insertps1:
; SSE41: # BB#0: ; SSE41: # BB#0:
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm1[2],xmm0[1,2,3] ; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm1[2],xmm0[1,2,3]
@ -2513,6 +2532,20 @@ define <4 x float> @combine_insertps1(<4 x float> %a, <4 x float> %b) {
} }
define <4 x float> @combine_insertps2(<4 x float> %a, <4 x float> %b) { define <4 x float> @combine_insertps2(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_insertps2:
; SSE2: # BB#0:
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_insertps2:
; SSSE3: # BB#0:
; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_insertps2: ; SSE41-LABEL: combine_insertps2:
; SSE41: # BB#0: ; SSE41: # BB#0:
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[2],xmm0[2,3] ; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[2],xmm0[2,3]
@ -2529,6 +2562,26 @@ define <4 x float> @combine_insertps2(<4 x float> %a, <4 x float> %b) {
} }
define <4 x float> @combine_insertps3(<4 x float> %a, <4 x float> %b) { define <4 x float> @combine_insertps3(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_insertps3:
; SSE2: # BB#0:
; SSE2-NEXT: movaps %xmm0, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,1]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1,3]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
; SSE2-NEXT: movaps %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_insertps3:
; SSSE3: # BB#0:
; SSSE3-NEXT: movaps %xmm0, %xmm2
; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,1]
; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1,3]
; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
; SSSE3-NEXT: movaps %xmm2, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_insertps3: ; SSE41-LABEL: combine_insertps3:
; SSE41: # BB#0: ; SSE41: # BB#0:
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3] ; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
@ -2545,6 +2598,24 @@ define <4 x float> @combine_insertps3(<4 x float> %a, <4 x float> %b) {
} }
define <4 x float> @combine_insertps4(<4 x float> %a, <4 x float> %b) { define <4 x float> @combine_insertps4(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_insertps4:
; SSE2: # BB#0:
; SSE2-NEXT: movaps %xmm0, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,1]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm2[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[2,1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_insertps4:
; SSSE3: # BB#0:
; SSSE3-NEXT: movaps %xmm0, %xmm2
; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,1]
; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm2[0,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[2,1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_insertps4: ; SSE41-LABEL: combine_insertps4:
; SSE41: # BB#0: ; SSE41: # BB#0:
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] ; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]

View File

@ -95,7 +95,7 @@ define <4 x float> @shuffle_v4f32_4zzz(<4 x float> %a) {
; SSE1-LABEL: shuffle_v4f32_4zzz: ; SSE1-LABEL: shuffle_v4f32_4zzz:
; SSE1: # BB#0: ; SSE1: # BB#0:
; SSE1-NEXT: xorps %xmm1, %xmm1 ; SSE1-NEXT: xorps %xmm1, %xmm1
; SSE1-NEXT: movss %xmm0, %xmm1 ; SSE1-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE1-NEXT: movaps %xmm1, %xmm0 ; SSE1-NEXT: movaps %xmm1, %xmm0
; SSE1-NEXT: retq ; SSE1-NEXT: retq
%shuffle = shufflevector <4 x float> zeroinitializer, <4 x float> %a, <4 x i32> <i32 4, i32 1, i32 2, i32 3> %shuffle = shufflevector <4 x float> zeroinitializer, <4 x float> %a, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
@ -163,7 +163,7 @@ define <4 x float> @insert_reg_and_zero_v4f32(float %a) {
; SSE1-LABEL: insert_reg_and_zero_v4f32: ; SSE1-LABEL: insert_reg_and_zero_v4f32:
; SSE1: # BB#0: ; SSE1: # BB#0:
; SSE1-NEXT: xorps %xmm1, %xmm1 ; SSE1-NEXT: xorps %xmm1, %xmm1
; SSE1-NEXT: movss %xmm0, %xmm1 ; SSE1-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE1-NEXT: movaps %xmm1, %xmm0 ; SSE1-NEXT: movaps %xmm1, %xmm0
; SSE1-NEXT: retq ; SSE1-NEXT: retq
%v = insertelement <4 x float> undef, float %a, i32 0 %v = insertelement <4 x float> undef, float %a, i32 0
@ -174,7 +174,7 @@ define <4 x float> @insert_reg_and_zero_v4f32(float %a) {
define <4 x float> @insert_mem_and_zero_v4f32(float* %ptr) { define <4 x float> @insert_mem_and_zero_v4f32(float* %ptr) {
; SSE1-LABEL: insert_mem_and_zero_v4f32: ; SSE1-LABEL: insert_mem_and_zero_v4f32:
; SSE1: # BB#0: ; SSE1: # BB#0:
; SSE1-NEXT: movss (%rdi), %xmm0 ; SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE1-NEXT: retq ; SSE1-NEXT: retq
%a = load float* %ptr %a = load float* %ptr
%v = insertelement <4 x float> undef, float %a, i32 0 %v = insertelement <4 x float> undef, float %a, i32 0
@ -189,8 +189,8 @@ define <4 x float> @insert_mem_lo_v4f32(<2 x float>* %ptr, <4 x float> %b) {
; SSE1-NEXT: movl %eax, -{{[0-9]+}}(%rsp) ; SSE1-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
; SSE1-NEXT: shrq $32, %rax ; SSE1-NEXT: shrq $32, %rax
; SSE1-NEXT: movl %eax, -{{[0-9]+}}(%rsp) ; SSE1-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
; SSE1-NEXT: movss -{{[0-9]+}}(%rsp), %xmm1 ; SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE1-NEXT: movss -{{[0-9]+}}(%rsp), %xmm2 ; SSE1-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE1-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE1-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSE1-NEXT: xorps %xmm2, %xmm2 ; SSE1-NEXT: xorps %xmm2, %xmm2
; SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,1] ; SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,1]
@ -210,8 +210,8 @@ define <4 x float> @insert_mem_hi_v4f32(<2 x float>* %ptr, <4 x float> %b) {
; SSE1-NEXT: movl %eax, -{{[0-9]+}}(%rsp) ; SSE1-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
; SSE1-NEXT: shrq $32, %rax ; SSE1-NEXT: shrq $32, %rax
; SSE1-NEXT: movl %eax, -{{[0-9]+}}(%rsp) ; SSE1-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
; SSE1-NEXT: movss -{{[0-9]+}}(%rsp), %xmm1 ; SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE1-NEXT: movss -{{[0-9]+}}(%rsp), %xmm2 ; SSE1-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE1-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE1-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSE1-NEXT: xorps %xmm2, %xmm2 ; SSE1-NEXT: xorps %xmm2, %xmm2
; SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,1] ; SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,1]