[x86] add another test for load splitting with extracted stores (PR42305); NFC

llvm-svn: 363732
This commit is contained in:
Sanjay Patel 2019-06-18 20:13:35 +00:00
parent fc5107cde6
commit 413ed69b4b
1 changed files with 69 additions and 1 deletions

View File

@ -129,7 +129,7 @@ define <7 x i64> @load7_unaligned(<7 x i64>* %x) {
; PR42305 - https://bugs.llvm.org/show_bug.cgi?id=42305
define void @load_split(<8 x float>* %ld, <4 x float>* %st1, <4 x float>* %st2) {
define void @load_split(<8 x float>* %ld, <4 x float>* %st1, <4 x float>* %st2) nounwind {
; X86-SSE-LABEL: load_split:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
@ -174,3 +174,71 @@ define void @load_split(<8 x float>* %ld, <4 x float>* %st1, <4 x float>* %st2)
store <4 x float> %t128, <4 x float>* %st2, align 1
ret void
}
define void @load_split_more(float* %src, i32* %idx, float* %dst) nounwind {
; X86-SSE-LABEL: load_split_more:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: pushl %esi
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl (%edx), %esi
; X86-SSE-NEXT: movups (%ecx), %xmm0
; X86-SSE-NEXT: movups 16(%ecx), %xmm1
; X86-SSE-NEXT: movups %xmm0, (%eax,%esi,4)
; X86-SSE-NEXT: movl 4(%edx), %ecx
; X86-SSE-NEXT: movups %xmm1, (%eax,%ecx,4)
; X86-SSE-NEXT: popl %esi
; X86-SSE-NEXT: retl
;
; X86-AVX-LABEL: load_split_more:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: pushl %esi
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-AVX-NEXT: movl (%edx), %esi
; X86-AVX-NEXT: vmovups (%ecx), %xmm0
; X86-AVX-NEXT: vmovups 16(%ecx), %xmm1
; X86-AVX-NEXT: vmovups %xmm0, (%eax,%esi,4)
; X86-AVX-NEXT: movl 4(%edx), %ecx
; X86-AVX-NEXT: vmovups %xmm1, (%eax,%ecx,4)
; X86-AVX-NEXT: popl %esi
; X86-AVX-NEXT: retl
;
; X64-SSE-LABEL: load_split_more:
; X64-SSE: # %bb.0:
; X64-SSE-NEXT: movslq (%rsi), %rax
; X64-SSE-NEXT: movups (%rdi), %xmm0
; X64-SSE-NEXT: movups 16(%rdi), %xmm1
; X64-SSE-NEXT: movups %xmm0, (%rdx,%rax,4)
; X64-SSE-NEXT: movslq 4(%rsi), %rax
; X64-SSE-NEXT: movups %xmm1, (%rdx,%rax,4)
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: load_split_more:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: movslq (%rsi), %rax
; X64-AVX-NEXT: vmovups (%rdi), %xmm0
; X64-AVX-NEXT: vmovups 16(%rdi), %xmm1
; X64-AVX-NEXT: vmovups %xmm0, (%rdx,%rax,4)
; X64-AVX-NEXT: movslq 4(%rsi), %rax
; X64-AVX-NEXT: vmovups %xmm1, (%rdx,%rax,4)
; X64-AVX-NEXT: retq
%v.i = bitcast float* %src to <8 x float>*
%tmp = load <8 x float>, <8 x float>* %v.i, align 1
%tmp1 = load i32, i32* %idx, align 4
%idx.ext = sext i32 %tmp1 to i64
%add.ptr1 = getelementptr inbounds float, float* %dst, i64 %idx.ext
%extract = shufflevector <8 x float> %tmp, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%v.i11 = bitcast float* %add.ptr1 to <4 x float>*
store <4 x float> %extract, <4 x float>* %v.i11, align 1
%arrayidx2 = getelementptr inbounds i32, i32* %idx, i64 1
%tmp2 = load i32, i32* %arrayidx2, align 4
%idx.ext3 = sext i32 %tmp2 to i64
%add.ptr4 = getelementptr inbounds float, float* %dst, i64 %idx.ext3
%extract5 = shufflevector <8 x float> %tmp, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%v.i10 = bitcast float* %add.ptr4 to <4 x float>*
store <4 x float> %extract5, <4 x float>* %v.i10, align 1
ret void
}