[x86] make test immune to oversized shift simplification

I'm not sure if this actually preserves the original intent
of this test, but if we leave it as-is, the -1 (oversized)
shift should be folded to undef and allow deleting half
of the output.

llvm-svn: 347501
This commit is contained in:
Sanjay Patel 2018-11-23 19:45:29 +00:00
parent 2ed41a7952
commit e0cc876363
1 changed files with 23 additions and 15 deletions

View File

@ -30,7 +30,9 @@ define <2 x i256> @test_shl(<2 x i256> %In) {
; X32-NEXT: movl %ecx, 36(%eax)
; X32-NEXT: shll $2, %edx
; X32-NEXT: movl %edx, 32(%eax)
; X32-NEXT: movl $0, 28(%eax)
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: shll $31, %ecx
; X32-NEXT: movl %ecx, 28(%eax)
; X32-NEXT: movl $0, 24(%eax)
; X32-NEXT: movl $0, 20(%eax)
; X32-NEXT: movl $0, 16(%eax)
@ -45,20 +47,22 @@ define <2 x i256> @test_shl(<2 x i256> %In) {
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdx
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi
; X64-NEXT: shldq $2, %rcx, %rdx
; X64-NEXT: shldq $2, %rsi, %rcx
; X64-NEXT: shldq $2, %r9, %rsi
; X64-NEXT: shldq $2, %rdi, %rcx
; X64-NEXT: shldq $2, %r9, %rdi
; X64-NEXT: shlq $63, %rsi
; X64-NEXT: shlq $2, %r9
; X64-NEXT: movq %rdx, 56(%rdi)
; X64-NEXT: movq %rcx, 48(%rdi)
; X64-NEXT: movq %rsi, 40(%rdi)
; X64-NEXT: movq %r9, 32(%rdi)
; X64-NEXT: movq %rdx, 56(%rax)
; X64-NEXT: movq %rcx, 48(%rax)
; X64-NEXT: movq %rdi, 40(%rax)
; X64-NEXT: movq %r9, 32(%rax)
; X64-NEXT: movq %rsi, 24(%rax)
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: movaps %xmm0, 16(%rdi)
; X64-NEXT: movaps %xmm0, (%rdi)
; X64-NEXT: movaps %xmm0, (%rax)
; X64-NEXT: movq $0, 16(%rax)
; X64-NEXT: retq
%Amt = insertelement <2 x i256> <i256 1, i256 2>, i256 -1, i32 0
%Amt = insertelement <2 x i256> <i256 1, i256 2>, i256 255, i32 0
%Out = shl <2 x i256> %In, %Amt
ret <2 x i256> %Out
}
@ -110,6 +114,9 @@ define <2 x i256> @test_srl(<2 x i256> %In) {
; X32-NEXT: movl %ebx, 40(%eax)
; X32-NEXT: movl %ebp, 36(%eax)
; X32-NEXT: movl %ecx, 32(%eax)
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: shrl $31, %ecx
; X32-NEXT: movl %ecx, (%eax)
; X32-NEXT: movl $0, 28(%eax)
; X32-NEXT: movl $0, 24(%eax)
; X32-NEXT: movl $0, 20(%eax)
@ -117,7 +124,6 @@ define <2 x i256> @test_srl(<2 x i256> %In) {
; X32-NEXT: movl $0, 12(%eax)
; X32-NEXT: movl $0, 8(%eax)
; X32-NEXT: movl $0, 4(%eax)
; X32-NEXT: movl $0, (%eax)
; X32-NEXT: addl $8, %esp
; X32-NEXT: .cfi_def_cfa_offset 20
; X32-NEXT: popl %esi
@ -138,17 +144,19 @@ define <2 x i256> @test_srl(<2 x i256> %In) {
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi
; X64-NEXT: shrdq $4, %rsi, %r9
; X64-NEXT: shrdq $4, %rcx, %rsi
; X64-NEXT: shrq $63, %r8
; X64-NEXT: shrdq $4, %rdx, %rcx
; X64-NEXT: shrq $4, %rdx
; X64-NEXT: movq %rdx, 56(%rdi)
; X64-NEXT: movq %rcx, 48(%rdi)
; X64-NEXT: movq %rsi, 40(%rdi)
; X64-NEXT: movq %r9, 32(%rdi)
; X64-NEXT: movq %r8, (%rdi)
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: movaps %xmm0, 16(%rdi)
; X64-NEXT: movaps %xmm0, (%rdi)
; X64-NEXT: movq $0, 8(%rdi)
; X64-NEXT: retq
%Amt = insertelement <2 x i256> <i256 3, i256 4>, i256 -1, i32 0
%Amt = insertelement <2 x i256> <i256 3, i256 4>, i256 255, i32 0
%Out = lshr <2 x i256> %In, %Amt
ret <2 x i256> %Out
}
@ -242,7 +250,7 @@ define <2 x i256> @test_sra(<2 x i256> %In) {
; X64-NEXT: movq %r8, 8(%rdi)
; X64-NEXT: movq %r8, (%rdi)
; X64-NEXT: retq
%Amt = insertelement <2 x i256> <i256 5, i256 6>, i256 -1, i32 0
%Amt = insertelement <2 x i256> <i256 5, i256 6>, i256 255, i32 0
%Out = ashr <2 x i256> %In, %Amt
ret <2 x i256> %Out
}