[X86][AVX] Test target shuffle combining on 32 and 64-bit targets

llvm-svn: 281833
This commit is contained in:
Simon Pilgrim 2016-09-17 18:42:41 +00:00
parent 331b9ae196
commit 06bfabbfb6
3 changed files with 1190 additions and 512 deletions

View File

@ -1,7 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1 ; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefix=X32
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2 ; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512F ; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512f | FileCheck %s --check-prefix=X32
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=X64
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=X64
; ;
; Combine tests involving AVX target shuffles ; Combine tests involving AVX target shuffles
@ -20,92 +23,141 @@ declare <8 x float> @llvm.x86.avx.vperm2f128.ps.256(<8 x float>, <8 x float>, i8
declare <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double>, <4 x double>, i8) declare <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double>, <4 x double>, i8)
define <4 x float> @combine_vpermilvar_4f32_identity(<4 x float> %a0) { define <4 x float> @combine_vpermilvar_4f32_identity(<4 x float> %a0) {
; ALL-LABEL: combine_vpermilvar_4f32_identity: ; X32-LABEL: combine_vpermilvar_4f32_identity:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_4f32_identity:
; X64: # BB#0:
; X64-NEXT: retq
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>) %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
%2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> <i32 3, i32 2, i32 1, i32 0>) %2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
ret <4 x float> %2 ret <4 x float> %2
} }
define <4 x float> @combine_vpermilvar_4f32_movddup(<4 x float> %a0) { define <4 x float> @combine_vpermilvar_4f32_movddup(<4 x float> %a0) {
; ALL-LABEL: combine_vpermilvar_4f32_movddup: ; X32-LABEL: combine_vpermilvar_4f32_movddup:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_4f32_movddup:
; X64: # BB#0:
; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; X64-NEXT: retq
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 1, i32 0, i32 1>) %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 1, i32 0, i32 1>)
ret <4 x float> %1 ret <4 x float> %1
} }
define <4 x float> @combine_vpermilvar_4f32_movddup_load(<4 x float> *%a0) { define <4 x float> @combine_vpermilvar_4f32_movddup_load(<4 x float> *%a0) {
; ALL-LABEL: combine_vpermilvar_4f32_movddup_load: ; X32-LABEL: combine_vpermilvar_4f32_movddup_load:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; ALL-NEXT: retq ; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_4f32_movddup_load:
; X64: # BB#0:
; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X64-NEXT: retq
%1 = load <4 x float>, <4 x float> *%a0 %1 = load <4 x float>, <4 x float> *%a0
%2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> <i32 0, i32 1, i32 0, i32 1>) %2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> <i32 0, i32 1, i32 0, i32 1>)
ret <4 x float> %2 ret <4 x float> %2
} }
define <4 x float> @combine_vpermilvar_4f32_movshdup(<4 x float> %a0) { define <4 x float> @combine_vpermilvar_4f32_movshdup(<4 x float> %a0) {
; ALL-LABEL: combine_vpermilvar_4f32_movshdup: ; X32-LABEL: combine_vpermilvar_4f32_movshdup:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; X32-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_4f32_movshdup:
; X64: # BB#0:
; X64-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X64-NEXT: retq
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 undef, i32 1, i32 3, i32 3>) %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 undef, i32 1, i32 3, i32 3>)
ret <4 x float> %1 ret <4 x float> %1
} }
define <4 x float> @combine_vpermilvar_4f32_movsldup(<4 x float> %a0) { define <4 x float> @combine_vpermilvar_4f32_movsldup(<4 x float> %a0) {
; ALL-LABEL: combine_vpermilvar_4f32_movsldup: ; X32-LABEL: combine_vpermilvar_4f32_movsldup:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] ; X32-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_4f32_movsldup:
; X64: # BB#0:
; X64-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
; X64-NEXT: retq
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 0, i32 2, i32 undef>) %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 0, i32 2, i32 undef>)
ret <4 x float> %1 ret <4 x float> %1
} }
define <4 x float> @combine_vpermilvar_4f32_unpckh(<4 x float> %a0) { define <4 x float> @combine_vpermilvar_4f32_unpckh(<4 x float> %a0) {
; ALL-LABEL: combine_vpermilvar_4f32_unpckh: ; X32-LABEL: combine_vpermilvar_4f32_unpckh:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3] ; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_4f32_unpckh:
; X64: # BB#0:
; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
; X64-NEXT: retq
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 2, i32 2, i32 3, i32 3>) %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 2, i32 2, i32 3, i32 3>)
ret <4 x float> %1 ret <4 x float> %1
} }
define <4 x float> @combine_vpermilvar_4f32_unpckl(<4 x float> %a0) { define <4 x float> @combine_vpermilvar_4f32_unpckl(<4 x float> %a0) {
; ALL-LABEL: combine_vpermilvar_4f32_unpckl: ; X32-LABEL: combine_vpermilvar_4f32_unpckl:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1] ; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_4f32_unpckl:
; X64: # BB#0:
; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
; X64-NEXT: retq
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 0, i32 1, i32 1>) %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 0, i32 0, i32 1, i32 1>)
ret <4 x float> %1 ret <4 x float> %1
} }
define <8 x float> @combine_vpermilvar_8f32_identity(<8 x float> %a0) { define <8 x float> @combine_vpermilvar_8f32_identity(<8 x float> %a0) {
; ALL-LABEL: combine_vpermilvar_8f32_identity: ; X32-LABEL: combine_vpermilvar_8f32_identity:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_8f32_identity:
; X64: # BB#0:
; X64-NEXT: retq
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 2, i32 3, i32 0, i32 undef>) %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 2, i32 3, i32 0, i32 undef>)
%2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 2, i32 3, i32 0, i32 1>) %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 2, i32 3, i32 0, i32 1>)
ret <8 x float> %2 ret <8 x float> %2
} }
define <8 x float> @combine_vpermilvar_8f32_10326u4u(<8 x float> %a0) { define <8 x float> @combine_vpermilvar_8f32_10326u4u(<8 x float> %a0) {
; ALL-LABEL: combine_vpermilvar_8f32_10326u4u: ; X32-LABEL: combine_vpermilvar_8f32_10326u4u:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,6,u,4,u] ; X32-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,6,u,4,u]
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_8f32_10326u4u:
; X64: # BB#0:
; X64-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,6,u,4,u]
; X64-NEXT: retq
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 0, i32 1, i32 2, i32 undef>) %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 0, i32 1, i32 2, i32 undef>)
%2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 undef>) %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 undef>)
ret <8 x float> %2 ret <8 x float> %2
} }
define <8 x float> @combine_vpermilvar_vperm2f128_8f32(<8 x float> %a0) { define <8 x float> @combine_vpermilvar_vperm2f128_8f32(<8 x float> %a0) {
; ALL-LABEL: combine_vpermilvar_vperm2f128_8f32: ; X32-LABEL: combine_vpermilvar_vperm2f128_8f32:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1] ; X32-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_vperm2f128_8f32:
; X64: # BB#0:
; X64-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
; X64-NEXT: retq
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>) %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
%2 = shufflevector <8 x float> %1, <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3> %2 = shufflevector <8 x float> %1, <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
%3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %2, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>) %3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %2, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
@ -113,10 +165,15 @@ define <8 x float> @combine_vpermilvar_vperm2f128_8f32(<8 x float> %a0) {
} }
define <8 x float> @combine_vpermilvar_vperm2f128_zero_8f32(<8 x float> %a0) { define <8 x float> @combine_vpermilvar_vperm2f128_zero_8f32(<8 x float> %a0) {
; ALL-LABEL: combine_vpermilvar_vperm2f128_zero_8f32: ; X32-LABEL: combine_vpermilvar_vperm2f128_zero_8f32:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1] ; X32-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_vperm2f128_zero_8f32:
; X64: # BB#0:
; X64-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
; X64-NEXT: retq
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>) %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
%2 = shufflevector <8 x float> %1, <8 x float> zeroinitializer, <8 x i32> <i32 8, i32 8, i32 8, i32 8, i32 0, i32 1, i32 2, i32 3> %2 = shufflevector <8 x float> %1, <8 x float> zeroinitializer, <8 x i32> <i32 8, i32 8, i32 8, i32 8, i32 0, i32 1, i32 2, i32 3>
%3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %2, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>) %3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %2, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
@ -124,11 +181,17 @@ define <8 x float> @combine_vpermilvar_vperm2f128_zero_8f32(<8 x float> %a0) {
} }
define <4 x double> @combine_vperm2f128_vpermilvar_as_vpblendpd(<4 x double> %a0) { define <4 x double> @combine_vperm2f128_vpermilvar_as_vpblendpd(<4 x double> %a0) {
; ALL-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd: ; X32-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1 ; X32-NEXT: vxorpd %ymm1, %ymm1, %ymm1
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3] ; X32-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd:
; X64: # BB#0:
; X64-NEXT: vxorpd %ymm1, %ymm1, %ymm1
; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; X64-NEXT: retq
%1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 2, i64 0, i64 2, i64 0>) %1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
%2 = shufflevector <4 x double> %1, <4 x double> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5> %2 = shufflevector <4 x double> %1, <4 x double> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
%3 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %2, <4 x i64> <i64 2, i64 0, i64 2, i64 0>) %3 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %2, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
@ -136,82 +199,130 @@ define <4 x double> @combine_vperm2f128_vpermilvar_as_vpblendpd(<4 x double> %a0
} }
define <8 x float> @combine_vpermilvar_8f32_movddup(<8 x float> %a0) { define <8 x float> @combine_vpermilvar_8f32_movddup(<8 x float> %a0) {
; ALL-LABEL: combine_vpermilvar_8f32_movddup: ; X32-LABEL: combine_vpermilvar_8f32_movddup:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] ; X32-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_8f32_movddup:
; X64: # BB#0:
; X64-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; X64-NEXT: retq
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5>) %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5>)
ret <8 x float> %1 ret <8 x float> %1
} }
define <8 x float> @combine_vpermilvar_8f32_movddup_load(<8 x float> *%a0) { define <8 x float> @combine_vpermilvar_8f32_movddup_load(<8 x float> *%a0) {
; ALL-LABEL: combine_vpermilvar_8f32_movddup_load: ; X32-LABEL: combine_vpermilvar_8f32_movddup_load:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vmovddup {{.*#+}} ymm0 = mem[0,0,2,2] ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; ALL-NEXT: retq ; X32-NEXT: vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_8f32_movddup_load:
; X64: # BB#0:
; X64-NEXT: vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
; X64-NEXT: retq
%1 = load <8 x float>, <8 x float> *%a0 %1 = load <8 x float>, <8 x float> *%a0
%2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5>) %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5>)
ret <8 x float> %2 ret <8 x float> %2
} }
define <8 x float> @combine_vpermilvar_8f32_movshdup(<8 x float> %a0) { define <8 x float> @combine_vpermilvar_8f32_movshdup(<8 x float> %a0) {
; ALL-LABEL: combine_vpermilvar_8f32_movshdup: ; X32-LABEL: combine_vpermilvar_8f32_movshdup:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] ; X32-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_8f32_movshdup:
; X64: # BB#0:
; X64-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
; X64-NEXT: retq
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 undef, i32 5, i32 7, i32 7>) %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 undef, i32 5, i32 7, i32 7>)
ret <8 x float> %1 ret <8 x float> %1
} }
define <8 x float> @combine_vpermilvar_8f32_movsldup(<8 x float> %a0) { define <8 x float> @combine_vpermilvar_8f32_movsldup(<8 x float> %a0) {
; ALL-LABEL: combine_vpermilvar_8f32_movsldup: ; X32-LABEL: combine_vpermilvar_8f32_movsldup:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] ; X32-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_8f32_movsldup:
; X64: # BB#0:
; X64-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
; X64-NEXT: retq
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>) %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>)
ret <8 x float> %1 ret <8 x float> %1
} }
define <2 x double> @combine_vpermilvar_2f64_identity(<2 x double> %a0) { define <2 x double> @combine_vpermilvar_2f64_identity(<2 x double> %a0) {
; ALL-LABEL: combine_vpermilvar_2f64_identity: ; X32-LABEL: combine_vpermilvar_2f64_identity:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: retq ; X32-NEXT: movl $2, %eax
; X32-NEXT: vmovd %eax, %xmm1
; X32-NEXT: vpermilpd %xmm1, %xmm0, %xmm0
; X32-NEXT: vpermilpd %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_2f64_identity:
; X64: # BB#0:
; X64-NEXT: retq
%1 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> <i64 2, i64 0>) %1 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> <i64 2, i64 0>)
%2 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %1, <2 x i64> <i64 2, i64 0>) %2 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %1, <2 x i64> <i64 2, i64 0>)
ret <2 x double> %2 ret <2 x double> %2
} }
define <2 x double> @combine_vpermilvar_2f64_movddup(<2 x double> %a0) { define <2 x double> @combine_vpermilvar_2f64_movddup(<2 x double> %a0) {
; ALL-LABEL: combine_vpermilvar_2f64_movddup: ; X32-LABEL: combine_vpermilvar_2f64_movddup:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_2f64_movddup:
; X64: # BB#0:
; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; X64-NEXT: retq
%1 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> <i64 0, i64 0>) %1 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> <i64 0, i64 0>)
ret <2 x double> %1 ret <2 x double> %1
} }
define <4 x double> @combine_vpermilvar_4f64_identity(<4 x double> %a0) { define <4 x double> @combine_vpermilvar_4f64_identity(<4 x double> %a0) {
; ALL-LABEL: combine_vpermilvar_4f64_identity: ; X32-LABEL: combine_vpermilvar_4f64_identity:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_4f64_identity:
; X64: # BB#0:
; X64-NEXT: retq
%1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 2, i64 0, i64 2, i64 0>) %1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
%2 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %1, <4 x i64> <i64 2, i64 0, i64 2, i64 0>) %2 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %1, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
ret <4 x double> %2 ret <4 x double> %2
} }
define <4 x double> @combine_vpermilvar_4f64_movddup(<4 x double> %a0) { define <4 x double> @combine_vpermilvar_4f64_movddup(<4 x double> %a0) {
; ALL-LABEL: combine_vpermilvar_4f64_movddup: ; X32-LABEL: combine_vpermilvar_4f64_movddup:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] ; X32-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_4f64_movddup:
; X64: # BB#0:
; X64-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; X64-NEXT: retq
%1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 0, i64 0, i64 4, i64 4>) %1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 0, i64 0, i64 4, i64 4>)
ret <4 x double> %1 ret <4 x double> %1
} }
define <4 x float> @combine_vpermilvar_4f32_4stage(<4 x float> %a0) { define <4 x float> @combine_vpermilvar_4f32_4stage(<4 x float> %a0) {
; ALL-LABEL: combine_vpermilvar_4f32_4stage: ; X32-LABEL: combine_vpermilvar_4f32_4stage:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,1] ; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,1]
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_4f32_4stage:
; X64: # BB#0:
; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,1]
; X64-NEXT: retq
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>) %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
%2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> <i32 2, i32 3, i32 0, i32 1>) %2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> <i32 2, i32 3, i32 0, i32 1>)
%3 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>) %3 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>)
@ -220,10 +331,15 @@ define <4 x float> @combine_vpermilvar_4f32_4stage(<4 x float> %a0) {
} }
define <8 x float> @combine_vpermilvar_8f32_4stage(<8 x float> %a0) { define <8 x float> @combine_vpermilvar_8f32_4stage(<8 x float> %a0) {
; ALL-LABEL: combine_vpermilvar_8f32_4stage: ; X32-LABEL: combine_vpermilvar_8f32_4stage:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,0,3,1,6,4,7,5] ; X32-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,0,3,1,6,4,7,5]
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_8f32_4stage:
; X64: # BB#0:
; X64-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,0,3,1,6,4,7,5]
; X64-NEXT: retq
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>) %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
%2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>) %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>)
%3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %2, <8 x i32> <i32 0, i32 2, i32 1, i32 3, i32 0, i32 2, i32 1, i32 3>) %3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %2, <8 x i32> <i32 0, i32 2, i32 1, i32 3, i32 0, i32 2, i32 1, i32 3>)
@ -232,48 +348,77 @@ define <8 x float> @combine_vpermilvar_8f32_4stage(<8 x float> %a0) {
} }
define <4 x float> @combine_vpermilvar_4f32_as_insertps(<4 x float> %a0) { define <4 x float> @combine_vpermilvar_4f32_as_insertps(<4 x float> %a0) {
; ALL-LABEL: combine_vpermilvar_4f32_as_insertps: ; X32-LABEL: combine_vpermilvar_4f32_as_insertps:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[1],zero,xmm0[2],zero ; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[1],zero,xmm0[2],zero
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpermilvar_4f32_as_insertps:
; X64: # BB#0:
; X64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[1],zero,xmm0[2],zero
; X64-NEXT: retq
%1 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>) %1 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
%2 = shufflevector <4 x float> %1, <4 x float> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 1, i32 4> %2 = shufflevector <4 x float> %1, <4 x float> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 1, i32 4>
ret <4 x float> %2 ret <4 x float> %2
} }
define <2 x double> @constant_fold_vpermilvar_pd() { define <2 x double> @constant_fold_vpermilvar_pd() {
; ALL-LABEL: constant_fold_vpermilvar_pd: ; X32-LABEL: constant_fold_vpermilvar_pd:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = mem[1,0] ; X32-NEXT: movl $2, %eax
; ALL-NEXT: retq ; X32-NEXT: vmovd %eax, %xmm0
; X32-NEXT: vmovapd {{.*#+}} xmm1 = [1.000000e+00,2.000000e+00]
; X32-NEXT: vpermilpd %xmm0, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: constant_fold_vpermilvar_pd:
; X64: # BB#0:
; X64-NEXT: vpermilpd {{.*#+}} xmm0 = mem[1,0]
; X64-NEXT: retq
%1 = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> <double 1.0, double 2.0>, <2 x i64> <i64 2, i64 0>) %1 = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> <double 1.0, double 2.0>, <2 x i64> <i64 2, i64 0>)
ret <2 x double> %1 ret <2 x double> %1
} }
define <4 x double> @constant_fold_vpermilvar_pd_256() { define <4 x double> @constant_fold_vpermilvar_pd_256() {
; ALL-LABEL: constant_fold_vpermilvar_pd_256: ; X32-LABEL: constant_fold_vpermilvar_pd_256:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = mem[1,0,2,3] ; X32-NEXT: vpermilpd {{.*#+}} ymm0 = mem[1,0,2,3]
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: constant_fold_vpermilvar_pd_256:
; X64: # BB#0:
; X64-NEXT: vpermilpd {{.*#+}} ymm0 = mem[1,0,2,3]
; X64-NEXT: retq
%1 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> <double 1.0, double 2.0, double 3.0, double 4.0>, <4 x i64> <i64 2, i64 0, i64 0, i64 2>) %1 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> <double 1.0, double 2.0, double 3.0, double 4.0>, <4 x i64> <i64 2, i64 0, i64 0, i64 2>)
ret <4 x double> %1 ret <4 x double> %1
} }
define <4 x float> @constant_fold_vpermilvar_ps() { define <4 x float> @constant_fold_vpermilvar_ps() {
; ALL-LABEL: constant_fold_vpermilvar_ps: ; X32-LABEL: constant_fold_vpermilvar_ps:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vpermilps {{.*#+}} xmm0 = mem[3,0,2,1] ; X32-NEXT: vpermilps {{.*#+}} xmm0 = mem[3,0,2,1]
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: constant_fold_vpermilvar_ps:
; X64: # BB#0:
; X64-NEXT: vpermilps {{.*#+}} xmm0 = mem[3,0,2,1]
; X64-NEXT: retq
%1 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, <4 x i32> <i32 3, i32 0, i32 2, i32 1>) %1 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, <4 x i32> <i32 3, i32 0, i32 2, i32 1>)
ret <4 x float> %1 ret <4 x float> %1
} }
define <8 x float> @constant_fold_vpermilvar_ps_256() { define <8 x float> @constant_fold_vpermilvar_ps_256() {
; ALL-LABEL: constant_fold_vpermilvar_ps_256: ; X32-LABEL: constant_fold_vpermilvar_ps_256:
; ALL: # BB#0: ; X32: # BB#0:
; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00,8.000000e+00] ; X32-NEXT: vmovaps {{.*#+}} ymm0 = [1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00,8.000000e+00]
; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,1,4,5,5,5] ; X32-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,1,4,5,5,5]
; ALL-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: constant_fold_vpermilvar_ps_256:
; X64: # BB#0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00,8.000000e+00]
; X64-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,1,4,5,5,5]
; X64-NEXT: retq
%1 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, <8 x i32> <i32 4, i32 0, i32 2, i32 1, i32 0, i32 1, i32 1, i32 1>) %1 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, <8 x i32> <i32 4, i32 0, i32 2, i32 1, i32 0, i32 1, i32 1, i32 1>)
ret <8 x float> %1 ret <8 x float> %1
} }

View File

@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s ; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>) declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>)
declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x i32>) declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x i32>)
@ -7,30 +8,45 @@ declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>)
declare <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8>, <32 x i8>) declare <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8>, <32 x i8>)
define <32 x i8> @combine_pshufb_pslldq(<32 x i8> %a0) { define <32 x i8> @combine_pshufb_pslldq(<32 x i8> %a0) {
; CHECK-LABEL: combine_pshufb_pslldq: ; X32-LABEL: combine_pshufb_pslldq:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vxorps %ymm0, %ymm0, %ymm0 ; X32-NEXT: vxorps %ymm0, %ymm0, %ymm0
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_pslldq:
; X64: # BB#0:
; X64-NEXT: vxorps %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>) %1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
%2 = shufflevector <32 x i8> %1, <32 x i8> zeroinitializer, <32 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23> %2 = shufflevector <32 x i8> %1, <32 x i8> zeroinitializer, <32 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
ret <32 x i8> %2 ret <32 x i8> %2
} }
define <32 x i8> @combine_pshufb_psrldq(<32 x i8> %a0) { define <32 x i8> @combine_pshufb_psrldq(<32 x i8> %a0) {
; CHECK-LABEL: combine_pshufb_psrldq: ; X32-LABEL: combine_pshufb_psrldq:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vxorps %ymm0, %ymm0, %ymm0 ; X32-NEXT: vxorps %ymm0, %ymm0, %ymm0
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_psrldq:
; X64: # BB#0:
; X64-NEXT: vxorps %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>) %1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>)
%2 = shufflevector <32 x i8> %1, <32 x i8> zeroinitializer, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32> %2 = shufflevector <32 x i8> %1, <32 x i8> zeroinitializer, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>
ret <32 x i8> %2 ret <32 x i8> %2
} }
define <32 x i8> @combine_pshufb_vpermd(<8 x i32> %a) { define <32 x i8> @combine_pshufb_vpermd(<8 x i32> %a) {
; CHECK-LABEL: combine_pshufb_vpermd: ; X32-LABEL: combine_pshufb_vpermd:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,16,17,18,18] ; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,16,17,18,18]
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_vpermd:
; X64: # BB#0:
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,16,17,18,18]
; X64-NEXT: retq
%tmp0 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 4>) %tmp0 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 4>)
%tmp1 = bitcast <8 x i32> %tmp0 to <32 x i8> %tmp1 = bitcast <8 x i32> %tmp0 to <32 x i8>
%tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 30> %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 30>
@ -38,10 +54,15 @@ define <32 x i8> @combine_pshufb_vpermd(<8 x i32> %a) {
} }
define <32 x i8> @combine_pshufb_vpermps(<8 x float> %a) { define <32 x i8> @combine_pshufb_vpermps(<8 x float> %a) {
; CHECK-LABEL: combine_pshufb_vpermps: ; X32-LABEL: combine_pshufb_vpermps:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,16,17,18,18] ; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,16,17,18,18]
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_vpermps:
; X64: # BB#0:
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,16,17,18,18]
; X64-NEXT: retq
%tmp0 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 4>) %tmp0 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 4>)
%tmp1 = bitcast <8 x float> %tmp0 to <32 x i8> %tmp1 = bitcast <8 x float> %tmp0 to <32 x i8>
%tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 30> %tmp2 = shufflevector <32 x i8> %tmp1, <32 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 30>
@ -49,11 +70,17 @@ define <32 x i8> @combine_pshufb_vpermps(<8 x float> %a) {
} }
define <4 x i64> @combine_permq_pshufb_as_vperm2i128(<4 x i64> %a0) { define <4 x i64> @combine_permq_pshufb_as_vperm2i128(<4 x i64> %a0) {
; CHECK-LABEL: combine_permq_pshufb_as_vperm2i128: ; X32-LABEL: combine_permq_pshufb_as_vperm2i128:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero ; X32-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero
; CHECK-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0 ; X32-NEXT: vpaddq {{\.LCPI.*}}, %ymm0, %ymm0
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_permq_pshufb_as_vperm2i128:
; X64: # BB#0:
; X64-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],zero,zero
; X64-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0> %1 = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
%2 = bitcast <4 x i64> %1 to <32 x i8> %2 = bitcast <4 x i64> %1 to <32 x i8>
%3 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %2, <32 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255>) %3 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %2, <32 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255>)
@ -63,11 +90,17 @@ define <4 x i64> @combine_permq_pshufb_as_vperm2i128(<4 x i64> %a0) {
} }
define <32 x i8> @combine_permq_pshufb_as_vpblendd(<4 x i64> %a0) { define <32 x i8> @combine_permq_pshufb_as_vpblendd(<4 x i64> %a0) {
; CHECK-LABEL: combine_permq_pshufb_as_vpblendd: ; X32-LABEL: combine_permq_pshufb_as_vpblendd:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; X32-NEXT: vpxor %ymm1, %ymm1, %ymm1
; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] ; X32-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_permq_pshufb_as_vpblendd:
; X64: # BB#0:
; X64-NEXT: vpxor %ymm1, %ymm1, %ymm1
; X64-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; X64-NEXT: retq
%1 = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2> %1 = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
%2 = bitcast <4 x i64> %1 to <32 x i8> %2 = bitcast <4 x i64> %1 to <32 x i8>
%3 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %2, <32 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255>) %3 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %2, <32 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255>)
@ -75,20 +108,31 @@ define <32 x i8> @combine_permq_pshufb_as_vpblendd(<4 x i64> %a0) {
} }
define <16 x i8> @combine_pshufb_as_vpbroadcastb128(<16 x i8> %a) { define <16 x i8> @combine_pshufb_as_vpbroadcastb128(<16 x i8> %a) {
; CHECK-LABEL: combine_pshufb_as_vpbroadcastb128: ; X32-LABEL: combine_pshufb_as_vpbroadcastb128:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vpbroadcastb %xmm0, %xmm0 ; X32-NEXT: vpbroadcastb %xmm0, %xmm0
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vpbroadcastb128:
; X64: # BB#0:
; X64-NEXT: vpbroadcastb %xmm0, %xmm0
; X64-NEXT: retq
%1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a, <16 x i8> zeroinitializer) %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a, <16 x i8> zeroinitializer)
ret <16 x i8> %1 ret <16 x i8> %1
} }
define <32 x i8> @combine_pshufb_as_vpbroadcastb256(<2 x i64> %a) { define <32 x i8> @combine_pshufb_as_vpbroadcastb256(<2 x i64> %a) {
; CHECK-LABEL: combine_pshufb_as_vpbroadcastb256: ; X32-LABEL: combine_pshufb_as_vpbroadcastb256:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def> ; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
; CHECK-NEXT: vpbroadcastb %xmm0, %ymm0 ; X32-NEXT: vpbroadcastb %xmm0, %ymm0
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vpbroadcastb256:
; X64: # BB#0:
; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
; X64-NEXT: vpbroadcastb %xmm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <2 x i64> %a, <2 x i64> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef> %1 = shufflevector <2 x i64> %a, <2 x i64> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
%2 = bitcast <4 x i64> %1 to <32 x i8> %2 = bitcast <4 x i64> %1 to <32 x i8>
%3 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %2, <32 x i8> zeroinitializer) %3 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %2, <32 x i8> zeroinitializer)
@ -99,20 +143,31 @@ define <32 x i8> @combine_pshufb_as_vpbroadcastb256(<2 x i64> %a) {
} }
define <16 x i8> @combine_pshufb_as_vpbroadcastw128(<16 x i8> %a) { define <16 x i8> @combine_pshufb_as_vpbroadcastw128(<16 x i8> %a) {
; CHECK-LABEL: combine_pshufb_as_vpbroadcastw128: ; X32-LABEL: combine_pshufb_as_vpbroadcastw128:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vpbroadcastw %xmm0, %xmm0 ; X32-NEXT: vpbroadcastw %xmm0, %xmm0
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vpbroadcastw128:
; X64: # BB#0:
; X64-NEXT: vpbroadcastw %xmm0, %xmm0
; X64-NEXT: retq
%1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a, <16 x i8> <i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1>) %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a, <16 x i8> <i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1>)
ret <16 x i8> %1 ret <16 x i8> %1
} }
define <32 x i8> @combine_pshufb_as_vpbroadcastw256(<2 x i64> %a) { define <32 x i8> @combine_pshufb_as_vpbroadcastw256(<2 x i64> %a) {
; CHECK-LABEL: combine_pshufb_as_vpbroadcastw256: ; X32-LABEL: combine_pshufb_as_vpbroadcastw256:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def> ; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
; CHECK-NEXT: vpbroadcastw %xmm0, %ymm0 ; X32-NEXT: vpbroadcastw %xmm0, %ymm0
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vpbroadcastw256:
; X64: # BB#0:
; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
; X64-NEXT: vpbroadcastw %xmm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <2 x i64> %a, <2 x i64> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef> %1 = shufflevector <2 x i64> %a, <2 x i64> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
%2 = bitcast <4 x i64> %1 to <32 x i8> %2 = bitcast <4 x i64> %1 to <32 x i8>
%3 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %2, <32 x i8> <i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1>) %3 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %2, <32 x i8> <i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1>)
@ -123,23 +178,36 @@ define <32 x i8> @combine_pshufb_as_vpbroadcastw256(<2 x i64> %a) {
} }
define <16 x i8> @combine_pshufb_as_vpbroadcastd128(<16 x i8> %a) { define <16 x i8> @combine_pshufb_as_vpbroadcastd128(<16 x i8> %a) {
; CHECK-LABEL: combine_pshufb_as_vpbroadcastd128: ; X32-LABEL: combine_pshufb_as_vpbroadcastd128:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vpbroadcastd %xmm0, %xmm0 ; X32-NEXT: vpbroadcastd %xmm0, %xmm0
; CHECK-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0 ; X32-NEXT: vpaddb {{\.LCPI.*}}, %xmm0, %xmm0
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vpbroadcastd128:
; X64: # BB#0:
; X64-NEXT: vpbroadcastd %xmm0, %xmm0
; X64-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: retq
%1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3>) %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3>)
%2 = add <16 x i8> %1, <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3> %2 = add <16 x i8> %1, <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3>
ret <16 x i8> %2 ret <16 x i8> %2
} }
define <8 x i32> @combine_permd_as_vpbroadcastd256(<4 x i32> %a) { define <8 x i32> @combine_permd_as_vpbroadcastd256(<4 x i32> %a) {
; CHECK-LABEL: combine_permd_as_vpbroadcastd256: ; X32-LABEL: combine_permd_as_vpbroadcastd256:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def> ; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
; CHECK-NEXT: vpbroadcastd %xmm0, %ymm0 ; X32-NEXT: vpbroadcastd %xmm0, %ymm0
; CHECK-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0 ; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm0, %ymm0
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_permd_as_vpbroadcastd256:
; X64: # BB#0:
; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
; X64-NEXT: vpbroadcastd %xmm0, %ymm0
; X64-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <4 x i32> %a, <4 x i32> undef, <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> %1 = shufflevector <4 x i32> %a, <4 x i32> undef, <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %1, <8 x i32> zeroinitializer) %2 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %1, <8 x i32> zeroinitializer)
%3 = add <8 x i32> %2, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3> %3 = add <8 x i32> %2, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@ -147,21 +215,33 @@ define <8 x i32> @combine_permd_as_vpbroadcastd256(<4 x i32> %a) {
} }
define <16 x i8> @combine_pshufb_as_vpbroadcastq128(<16 x i8> %a) { define <16 x i8> @combine_pshufb_as_vpbroadcastq128(<16 x i8> %a) {
; CHECK-LABEL: combine_pshufb_as_vpbroadcastq128: ; X32-LABEL: combine_pshufb_as_vpbroadcastq128:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0 ; X32-NEXT: vpbroadcastq %xmm0, %xmm0
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vpbroadcastq128:
; X64: # BB#0:
; X64-NEXT: vpbroadcastq %xmm0, %xmm0
; X64-NEXT: retq
%1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>) %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
ret <16 x i8> %1 ret <16 x i8> %1
} }
define <8 x i32> @combine_permd_as_vpbroadcastq256(<4 x i32> %a) { define <8 x i32> @combine_permd_as_vpbroadcastq256(<4 x i32> %a) {
; CHECK-LABEL: combine_permd_as_vpbroadcastq256: ; X32-LABEL: combine_permd_as_vpbroadcastq256:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def> ; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
; CHECK-NEXT: vpbroadcastq %xmm0, %ymm0 ; X32-NEXT: vpbroadcastq %xmm0, %ymm0
; CHECK-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0 ; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm0, %ymm0
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_permd_as_vpbroadcastq256:
; X64: # BB#0:
; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
; X64-NEXT: vpbroadcastq %xmm0, %ymm0
; X64-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <4 x i32> %a, <4 x i32> undef, <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> %1 = shufflevector <4 x i32> %a, <4 x i32> undef, <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %1, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>) %2 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %1, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>)
%3 = add <8 x i32> %2, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3> %3 = add <8 x i32> %2, <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@ -169,10 +249,15 @@ define <8 x i32> @combine_permd_as_vpbroadcastq256(<4 x i32> %a) {
} }
define <4 x float> @combine_pshufb_as_vpbroadcastss128(<4 x float> %a) { define <4 x float> @combine_pshufb_as_vpbroadcastss128(<4 x float> %a) {
; CHECK-LABEL: combine_pshufb_as_vpbroadcastss128: ; X32-LABEL: combine_pshufb_as_vpbroadcastss128:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vbroadcastss %xmm0, %xmm0 ; X32-NEXT: vbroadcastss %xmm0, %xmm0
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vpbroadcastss128:
; X64: # BB#0:
; X64-NEXT: vbroadcastss %xmm0, %xmm0
; X64-NEXT: retq
%1 = bitcast <4 x float> %a to <16 x i8> %1 = bitcast <4 x float> %a to <16 x i8>
%2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3>) %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3>)
%3 = bitcast <16 x i8> %2 to <4 x float> %3 = bitcast <16 x i8> %2 to <4 x float>
@ -180,22 +265,34 @@ define <4 x float> @combine_pshufb_as_vpbroadcastss128(<4 x float> %a) {
} }
define <8 x float> @combine_permd_as_vpbroadcastss256(<4 x float> %a) { define <8 x float> @combine_permd_as_vpbroadcastss256(<4 x float> %a) {
; CHECK-LABEL: combine_permd_as_vpbroadcastss256: ; X32-LABEL: combine_permd_as_vpbroadcastss256:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def> ; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
; CHECK-NEXT: vbroadcastss %xmm0, %ymm0 ; X32-NEXT: vbroadcastss %xmm0, %ymm0
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_permd_as_vpbroadcastss256:
; X64: # BB#0:
; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
; X64-NEXT: vbroadcastss %xmm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> %1 = shufflevector <4 x float> %a, <4 x float> undef, <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %1, <8 x i32> zeroinitializer) %2 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %1, <8 x i32> zeroinitializer)
ret <8 x float> %2 ret <8 x float> %2
} }
define <4 x double> @combine_permd_as_vpbroadcastsd256(<2 x double> %a) { define <4 x double> @combine_permd_as_vpbroadcastsd256(<2 x double> %a) {
; CHECK-LABEL: combine_permd_as_vpbroadcastsd256: ; X32-LABEL: combine_permd_as_vpbroadcastsd256:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def> ; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 ; X32-NEXT: vbroadcastsd %xmm0, %ymm0
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_permd_as_vpbroadcastsd256:
; X64: # BB#0:
; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <2 x double> %a, <2 x double> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef> %1 = shufflevector <2 x double> %a, <2 x double> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
%2 = bitcast <4 x double> %1 to <8 x float> %2 = bitcast <4 x double> %1 to <8 x float>
%3 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %2, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>) %3 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %2, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>)
@ -204,30 +301,45 @@ define <4 x double> @combine_permd_as_vpbroadcastsd256(<2 x double> %a) {
} }
define <16 x i8> @combine_vpbroadcast_pshufb_as_vpbroadcastb128(<16 x i8> %a) { define <16 x i8> @combine_vpbroadcast_pshufb_as_vpbroadcastb128(<16 x i8> %a) {
; CHECK-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastb128: ; X32-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastb128:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vpbroadcastb %xmm0, %xmm0 ; X32-NEXT: vpbroadcastb %xmm0, %xmm0
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastb128:
; X64: # BB#0:
; X64-NEXT: vpbroadcastb %xmm0, %xmm0
; X64-NEXT: retq
%1 = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> zeroinitializer %1 = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> zeroinitializer
%2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> zeroinitializer) %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> zeroinitializer)
ret <16 x i8> %2 ret <16 x i8> %2
} }
define <32 x i8> @combine_vpbroadcast_pshufb_as_vpbroadcastb256(<32 x i8> %a) { define <32 x i8> @combine_vpbroadcast_pshufb_as_vpbroadcastb256(<32 x i8> %a) {
; CHECK-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastb256: ; X32-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastb256:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vpbroadcastb %xmm0, %ymm0 ; X32-NEXT: vpbroadcastb %xmm0, %ymm0
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastb256:
; X64: # BB#0:
; X64-NEXT: vpbroadcastb %xmm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <32 x i8> %a, <32 x i8> undef, <32 x i32> zeroinitializer %1 = shufflevector <32 x i8> %a, <32 x i8> undef, <32 x i32> zeroinitializer
%2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> zeroinitializer) %2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> zeroinitializer)
ret <32 x i8> %2 ret <32 x i8> %2
} }
define <4 x float> @combine_vpbroadcast_pshufb_as_vpbroadcastss128(<4 x float> %a) { define <4 x float> @combine_vpbroadcast_pshufb_as_vpbroadcastss128(<4 x float> %a) {
; CHECK-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastss128: ; X32-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastss128:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vbroadcastss %xmm0, %xmm0 ; X32-NEXT: vbroadcastss %xmm0, %xmm0
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpbroadcast_pshufb_as_vpbroadcastss128:
; X64: # BB#0:
; X64-NEXT: vbroadcastss %xmm0, %xmm0
; X64-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> zeroinitializer %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> zeroinitializer
%2 = bitcast <4 x float> %1 to <16 x i8> %2 = bitcast <4 x float> %1 to <16 x i8>
%3 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3>) %3 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3>)
@ -236,22 +348,34 @@ define <4 x float> @combine_vpbroadcast_pshufb_as_vpbroadcastss128(<4 x float> %
} }
define <8 x float> @combine_vpbroadcast_permd_as_vpbroadcastss256(<4 x float> %a) { define <8 x float> @combine_vpbroadcast_permd_as_vpbroadcastss256(<4 x float> %a) {
; CHECK-LABEL: combine_vpbroadcast_permd_as_vpbroadcastss256: ; X32-LABEL: combine_vpbroadcast_permd_as_vpbroadcastss256:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vbroadcastss %xmm0, %ymm0 ; X32-NEXT: vbroadcastss %xmm0, %ymm0
; CHECK-NEXT: vbroadcastss %xmm0, %ymm0 ; X32-NEXT: vbroadcastss %xmm0, %ymm0
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpbroadcast_permd_as_vpbroadcastss256:
; X64: # BB#0:
; X64-NEXT: vbroadcastss %xmm0, %ymm0
; X64-NEXT: vbroadcastss %xmm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <8 x i32> zeroinitializer %1 = shufflevector <4 x float> %a, <4 x float> undef, <8 x i32> zeroinitializer
%2 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %1, <8 x i32> zeroinitializer) %2 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %1, <8 x i32> zeroinitializer)
ret <8 x float> %2 ret <8 x float> %2
} }
define <4 x double> @combine_vpbroadcast_permd_as_vpbroadcastsd256(<2 x double> %a) { define <4 x double> @combine_vpbroadcast_permd_as_vpbroadcastsd256(<2 x double> %a) {
; CHECK-LABEL: combine_vpbroadcast_permd_as_vpbroadcastsd256: ; X32-LABEL: combine_vpbroadcast_permd_as_vpbroadcastsd256:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 ; X32-NEXT: vbroadcastsd %xmm0, %ymm0
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 ; X32-NEXT: vbroadcastsd %xmm0, %ymm0
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_vpbroadcast_permd_as_vpbroadcastsd256:
; X64: # BB#0:
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <2 x double> %a, <2 x double> undef, <4 x i32> zeroinitializer %1 = shufflevector <2 x double> %a, <2 x double> undef, <4 x i32> zeroinitializer
%2 = bitcast <4 x double> %1 to <8 x float> %2 = bitcast <4 x double> %1 to <8 x float>
%3 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %2, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>) %3 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %2, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>)
@ -260,29 +384,45 @@ define <4 x double> @combine_vpbroadcast_permd_as_vpbroadcastsd256(<2 x double>
} }
define <8 x i32> @combine_permd_as_permq(<8 x i32> %a) { define <8 x i32> @combine_permd_as_permq(<8 x i32> %a) {
; CHECK-LABEL: combine_permd_as_permq: ; X32-LABEL: combine_permd_as_permq:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,1] ; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,1]
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_permd_as_permq:
; X64: # BB#0:
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,1]
; X64-NEXT: retq
%1 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a, <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 4, i32 5, i32 2, i32 3>) %1 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a, <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 4, i32 5, i32 2, i32 3>)
ret <8 x i32> %1 ret <8 x i32> %1
} }
define <8 x float> @combine_permps_as_permpd(<8 x float> %a) { define <8 x float> @combine_permps_as_permpd(<8 x float> %a) {
; CHECK-LABEL: combine_permps_as_permpd: ; X32-LABEL: combine_permps_as_permpd:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,0,1] ; X32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,0,1]
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_permps_as_permpd:
; X64: # BB#0:
; X64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,0,1]
; X64-NEXT: retq
%1 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 0, i32 1, i32 2, i32 3>) %1 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 0, i32 1, i32 2, i32 3>)
ret <8 x float> %1 ret <8 x float> %1
} }
define <4 x double> @combine_pshufb_as_vzmovl_64(<4 x double> %a0) { define <4 x double> @combine_pshufb_as_vzmovl_64(<4 x double> %a0) {
; CHECK-LABEL: combine_pshufb_as_vzmovl_64: ; X32-LABEL: combine_pshufb_as_vzmovl_64:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vxorpd %ymm1, %ymm1, %ymm1 ; X32-NEXT: vxorpd %ymm1, %ymm1, %ymm1
; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3] ; X32-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vzmovl_64:
; X64: # BB#0:
; X64-NEXT: vxorpd %ymm1, %ymm1, %ymm1
; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; X64-NEXT: retq
%1 = bitcast <4 x double> %a0 to <32 x i8> %1 = bitcast <4 x double> %a0 to <32 x i8>
%2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>) %2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
%3 = bitcast <32 x i8> %2 to <4 x double> %3 = bitcast <32 x i8> %2 to <4 x double>
@ -290,11 +430,17 @@ define <4 x double> @combine_pshufb_as_vzmovl_64(<4 x double> %a0) {
} }
define <8 x float> @combine_pshufb_as_vzmovl_32(<8 x float> %a0) { define <8 x float> @combine_pshufb_as_vzmovl_32(<8 x float> %a0) {
; CHECK-LABEL: combine_pshufb_as_vzmovl_32: ; X32-LABEL: combine_pshufb_as_vzmovl_32:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vxorps %ymm1, %ymm1, %ymm1 ; X32-NEXT: vxorps %ymm1, %ymm1, %ymm1
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7] ; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vzmovl_32:
; X64: # BB#0:
; X64-NEXT: vxorps %ymm1, %ymm1, %ymm1
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
; X64-NEXT: retq
%1 = bitcast <8 x float> %a0 to <32 x i8> %1 = bitcast <8 x float> %a0 to <32 x i8>
%2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>) %2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
%3 = bitcast <32 x i8> %2 to <8 x float> %3 = bitcast <32 x i8> %2 to <8 x float>
@ -302,77 +448,120 @@ define <8 x float> @combine_pshufb_as_vzmovl_32(<8 x float> %a0) {
} }
define <32 x i8> @combine_pshufb_as_pslldq(<32 x i8> %a0) { define <32 x i8> @combine_pshufb_as_pslldq(<32 x i8> %a0) {
; CHECK-LABEL: combine_pshufb_as_pslldq: ; X32-LABEL: combine_pshufb_as_pslldq:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21] ; X32-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21]
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_pslldq:
; X64: # BB#0:
; X64-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21]
; X64-NEXT: retq
%res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5>) %res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5>)
ret <32 x i8> %res0 ret <32 x i8> %res0
} }
define <32 x i8> @combine_pshufb_as_psrldq(<32 x i8> %a0) { define <32 x i8> @combine_pshufb_as_psrldq(<32 x i8> %a0) {
; CHECK-LABEL: combine_pshufb_as_psrldq: ; X32-LABEL: combine_pshufb_as_psrldq:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; X32-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_psrldq:
; X64: # BB#0:
; X64-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; X64-NEXT: retq
%res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>) %res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>)
ret <32 x i8> %res0 ret <32 x i8> %res0
} }
define <32 x i8> @combine_pshufb_as_pshuflw(<32 x i8> %a0) { define <32 x i8> @combine_pshufb_as_pshuflw(<32 x i8> %a0) {
; CHECK-LABEL: combine_pshufb_as_pshuflw: ; X32-LABEL: combine_pshufb_as_pshuflw:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15] ; X32-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15]
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_pshuflw:
; X64: # BB#0:
; X64-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15]
; X64-NEXT: retq
%res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>) %res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)
ret <32 x i8> %res0 ret <32 x i8> %res0
} }
define <32 x i8> @combine_pshufb_as_pshufhw(<32 x i8> %a0) { define <32 x i8> @combine_pshufb_as_pshufhw(<32 x i8> %a0) {
; CHECK-LABEL: combine_pshufb_as_pshufhw: ; X32-LABEL: combine_pshufb_as_pshufhw:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14] ; X32-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14]
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_pshufhw:
; X64: # BB#0:
; X64-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14]
; X64-NEXT: retq
%res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13>) %res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13>)
ret <32 x i8> %res0 ret <32 x i8> %res0
} }
define <32 x i8> @combine_pshufb_not_as_pshufw(<32 x i8> %a0) { define <32 x i8> @combine_pshufb_not_as_pshufw(<32 x i8> %a0) {
; CHECK-LABEL: combine_pshufb_not_as_pshufw: ; X32-LABEL: combine_pshufb_not_as_pshufw:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13,18,19,16,17,22,23,20,21,26,27,24,25,30,31,28,29] ; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13,18,19,16,17,22,23,20,21,26,27,24,25,30,31,28,29]
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_not_as_pshufw:
; X64: # BB#0:
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13,18,19,16,17,22,23,20,21,26,27,24,25,30,31,28,29]
; X64-NEXT: retq
%res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>) %res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)
%res1 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %res0, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13>) %res1 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %res0, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13>)
ret <32 x i8> %res1 ret <32 x i8> %res1
} }
define <8 x i32> @constant_fold_permd() { define <8 x i32> @constant_fold_permd() {
; CHECK-LABEL: constant_fold_permd: ; X32-LABEL: constant_fold_permd:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [4,6,2,1,7,1,5,0] ; X32-NEXT: vmovdqa {{.*#+}} ymm0 = [4,6,2,1,7,1,5,0]
; CHECK-NEXT: vpermd {{.*}}(%rip), %ymm0, %ymm0 ; X32-NEXT: vpermd {{\.LCPI.*}}, %ymm0, %ymm0
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: constant_fold_permd:
; X64: # BB#0:
; X64-NEXT: vmovdqa {{.*#+}} ymm0 = [4,6,2,1,7,1,5,0]
; X64-NEXT: vpermd {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%1 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>, <8 x i32> <i32 4, i32 6, i32 2, i32 1, i32 7, i32 1, i32 5, i32 0>) %1 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>, <8 x i32> <i32 4, i32 6, i32 2, i32 1, i32 7, i32 1, i32 5, i32 0>)
ret <8 x i32> %1 ret <8 x i32> %1
} }
define <8 x float> @constant_fold_permps() { define <8 x float> @constant_fold_permps() {
; CHECK-LABEL: constant_fold_permps: ; X32-LABEL: constant_fold_permps:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [4,6,2,1,7,1,5,0] ; X32-NEXT: vmovaps {{.*#+}} ymm0 = [4,6,2,1,7,1,5,0]
; CHECK-NEXT: vpermps {{.*}}(%rip), %ymm0, %ymm0 ; X32-NEXT: vpermps {{\.LCPI.*}}, %ymm0, %ymm0
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: constant_fold_permps:
; X64: # BB#0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [4,6,2,1,7,1,5,0]
; X64-NEXT: vpermps {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%1 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, <8 x i32> <i32 4, i32 6, i32 2, i32 1, i32 7, i32 1, i32 5, i32 0>) %1 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, <8 x i32> <i32 4, i32 6, i32 2, i32 1, i32 7, i32 1, i32 5, i32 0>)
ret <8 x float> %1 ret <8 x float> %1
} }
define <32 x i8> @constant_fold_pshufb_256() { define <32 x i8> @constant_fold_pshufb_256() {
; CHECK-LABEL: constant_fold_pshufb_256: ; X32-LABEL: constant_fold_pshufb_256:
; CHECK: # BB#0: ; X32: # BB#0:
; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,0,255,254,253,252,251,250,249,248,247,246,245,244,243,242,241] ; X32-NEXT: vmovdqa {{.*#+}} ymm0 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,0,255,254,253,252,251,250,249,248,247,246,245,244,243,242,241]
; CHECK-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1],zero,zero,zero,ymm0[u,u],zero,zero,ymm0[15],zero,zero,zero,zero,zero,ymm0[7,6,17],zero,zero,zero,ymm0[u,u],zero,zero,ymm0[31],zero,zero,zero,zero,zero,ymm0[23,22] ; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1],zero,zero,zero,ymm0[u,u],zero,zero,ymm0[15],zero,zero,zero,zero,zero,ymm0[7,6,17],zero,zero,zero,ymm0[u,u],zero,zero,ymm0[31],zero,zero,zero,zero,zero,ymm0[23,22]
; CHECK-NEXT: retq ; X32-NEXT: retl
;
; X64-LABEL: constant_fold_pshufb_256:
; X64: # BB#0:
; X64-NEXT: vmovdqa {{.*#+}} ymm0 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,0,255,254,253,252,251,250,249,248,247,246,245,244,243,242,241]
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1],zero,zero,zero,ymm0[u,u],zero,zero,ymm0[15],zero,zero,zero,zero,zero,ymm0[7,6,17],zero,zero,zero,ymm0[u,u],zero,zero,ymm0[31],zero,zero,zero,zero,zero,ymm0[23,22]
; X64-NEXT: retq
%1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> <i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15>, <32 x i8> <i8 1, i8 -1, i8 -1, i8 -1, i8 undef, i8 undef, i8 -1, i8 -1, i8 15, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 7, i8 6, i8 1, i8 -1, i8 -1, i8 -1, i8 undef, i8 undef, i8 -1, i8 -1, i8 15, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 7, i8 6>) %1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> <i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15>, <32 x i8> <i8 1, i8 -1, i8 -1, i8 -1, i8 undef, i8 undef, i8 -1, i8 -1, i8 15, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 7, i8 6, i8 1, i8 -1, i8 -1, i8 -1, i8 undef, i8 undef, i8 -1, i8 -1, i8 15, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 7, i8 6>)
ret <32 x i8> %1 ret <32 x i8> %1
} }

File diff suppressed because it is too large Load Diff