[x86] Clean up and enhance a test around eflags copying.

This adds the basic test cases from all the EFLAGS bugs in more direct
forms. It also switches to generated check lines, and includes both
32-bit and 64-bit variations.

No functionality changing here, just setting things up to have a nice
clean asm diff in my EFLAGS patch.

llvm-svn: 329056
This commit is contained in:
Chandler Carruth 2018-04-03 10:04:37 +00:00
parent 6646becd0c
commit 44a791a57a
1 changed files with 212 additions and 25 deletions

View File

@ -1,6 +1,8 @@
; RUN: llc -o - %s | FileCheck %s
; This tests for the problem originally reported in http://llvm.org/PR25951
target triple = "i686-unknown-linux-gnu"
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -o - -mtriple=i686-unknown-unknown %s | FileCheck %s --check-prefixes=ALL,X32
; RUN: llc -o - -mtriple=x86_64-unknown-unknown %s | FileCheck %s --check-prefixes=ALL,X64
;
; Test patterns that require preserving and restoring flags.
@b = common global i8 0, align 1
@c = common global i32 0, align 4
@ -8,13 +10,75 @@ target triple = "i686-unknown-linux-gnu"
@d = common global i8 0, align 1
@.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
; CHECK-LABEL: func:
; This tests whether eax is properly saved/restored around the
; lahf/sahf instruction sequences. We make mem op volatile to prevent
; their reordering to avoid spills.
declare void @external(i32)
define i32 @func() {
; A test that re-uses flags in interesting ways due to volatile accesses.
; Specifically, the first increment's flags are reused for the branch despite
; being clobbered by the second increment.
define i32 @test1() nounwind {
; X32-LABEL: test1:
; X32: # %bb.0: # %entry
; X32-NEXT: movb b, %cl
; X32-NEXT: movb %cl, %al
; X32-NEXT: incb %al
; X32-NEXT: movb %al, b
; X32-NEXT: incl c
; X32-NEXT: pushl %eax
; X32-NEXT: seto %al
; X32-NEXT: lahf
; X32-NEXT: movl %eax, %edx
; X32-NEXT: popl %eax
; X32-NEXT: movb a, %ah
; X32-NEXT: movb %ah, %ch
; X32-NEXT: incb %ch
; X32-NEXT: cmpb %cl, %ah
; X32-NEXT: sete d
; X32-NEXT: movb %ch, a
; X32-NEXT: pushl %eax
; X32-NEXT: movl %edx, %eax
; X32-NEXT: addb $127, %al
; X32-NEXT: sahf
; X32-NEXT: popl %eax
; X32-NEXT: je .LBB0_2
; X32-NEXT: # %bb.1: # %if.then
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: movsbl %al, %eax
; X32-NEXT: pushl %eax
; X32-NEXT: calll external
; X32-NEXT: addl $4, %esp
; X32-NEXT: popl %ebp
; X32-NEXT: .LBB0_2: # %if.end
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: retl
;
; X64-LABEL: test1:
; X64: # %bb.0: # %entry
; X64-NEXT: movb {{.*}}(%rip), %dil
; X64-NEXT: movl %edi, %eax
; X64-NEXT: incb %al
; X64-NEXT: movb %al, {{.*}}(%rip)
; X64-NEXT: incl {{.*}}(%rip)
; X64-NEXT: pushfq
; X64-NEXT: popq %rsi
; X64-NEXT: movb {{.*}}(%rip), %cl
; X64-NEXT: movl %ecx, %edx
; X64-NEXT: incb %dl
; X64-NEXT: cmpb %dil, %cl
; X64-NEXT: sete {{.*}}(%rip)
; X64-NEXT: movb %dl, {{.*}}(%rip)
; X64-NEXT: pushq %rsi
; X64-NEXT: popfq
; X64-NEXT: je .LBB0_2
; X64-NEXT: # %bb.1: # %if.then
; X64-NEXT: pushq %rbp
; X64-NEXT: movq %rsp, %rbp
; X64-NEXT: movsbl %al, %edi
; X64-NEXT: callq external
; X64-NEXT: popq %rbp
; X64-NEXT: .LBB0_2: # %if.end
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: retq
entry:
%bval = load i8, i8* @b
%inc = add i8 %bval, 1
@ -25,33 +89,156 @@ entry:
%aval = load volatile i8, i8* @a
%inc2 = add i8 %aval, 1
store volatile i8 %inc2, i8* @a
; Copy flags produced by the incb of %inc1 to a register, need to save+restore
; eax around it. The flags will be reused by %tobool.
; CHECK: pushl %eax
; CHECK: seto %al
; CHECK: lahf
; CHECK: movl %eax, [[REG:%[a-z]+]]
; CHECK: popl %eax
%cmp = icmp eq i8 %aval, %bval
%conv5 = zext i1 %cmp to i8
store i8 %conv5, i8* @d
%tobool = icmp eq i32 %inc1, 0
; We restore flags with an 'addb, sahf' sequence, need to save+restore eax
; around it.
; CHECK: pushl %eax
; CHECK: movl [[REG]], %eax
; CHECK: addb $127, %al
; CHECK: sahf
; CHECK: popl %eax
br i1 %tobool, label %if.end, label %if.then
if.then:
%conv6 = sext i8 %inc to i32
%call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %conv6)
call void @external(i32 %conv6)
br label %if.end
if.end:
ret i32 0
}
declare i32 @printf(i8* nocapture readonly, ...)
; Preserve increment flags across a call.
define i32 @test2(i32* %ptr) nounwind {
; X32-LABEL: test2:
; X32: # %bb.0: # %entry
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: pushl %esi
; X32-NEXT: movl 8(%ebp), %eax
; X32-NEXT: incl (%eax)
; X32-NEXT: seto %al
; X32-NEXT: lahf
; X32-NEXT: movl %eax, %esi
; X32-NEXT: pushl $42
; X32-NEXT: calll external
; X32-NEXT: addl $4, %esp
; X32-NEXT: movl %esi, %eax
; X32-NEXT: addb $127, %al
; X32-NEXT: sahf
; X32-NEXT: je .LBB1_1
; X32-NEXT: # %bb.3: # %else
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: jmp .LBB1_2
; X32-NEXT: .LBB1_1: # %then
; X32-NEXT: movl $64, %eax
; X32-NEXT: .LBB1_2: # %then
; X32-NEXT: popl %esi
; X32-NEXT: popl %ebp
; X32-NEXT: retl
;
; X64-LABEL: test2:
; X64: # %bb.0: # %entry
; X64-NEXT: pushq %rbp
; X64-NEXT: movq %rsp, %rbp
; X64-NEXT: pushq %rbx
; X64-NEXT: pushq %rax
; X64-NEXT: incl (%rdi)
; X64-NEXT: pushfq
; X64-NEXT: popq %rbx
; X64-NEXT: movl $42, %edi
; X64-NEXT: callq external
; X64-NEXT: pushq %rbx
; X64-NEXT: popfq
; X64-NEXT: je .LBB1_1
; X64-NEXT: # %bb.3: # %else
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: jmp .LBB1_2
; X64-NEXT: .LBB1_1: # %then
; X64-NEXT: movl $64, %eax
; X64-NEXT: .LBB1_2: # %then
; X64-NEXT: addq $8, %rsp
; X64-NEXT: popq %rbx
; X64-NEXT: popq %rbp
; X64-NEXT: retq
entry:
%val = load i32, i32* %ptr
%inc = add i32 %val, 1
store i32 %inc, i32* %ptr
%cmp = icmp eq i32 %inc, 0
call void @external(i32 42)
br i1 %cmp, label %then, label %else
then:
ret i32 64
else:
ret i32 0
}
declare void @external_a()
declare void @external_b()
; This lowers to a conditional tail call instead of a conditional branch. This
; is tricky because we can only do this from a leaf function, and so we have to
; use volatile stores similar to test1 to force the save and restore of
; a condition without calling another function. We then set up subsequent calls
; in tail position.
define void @test_tail_call(i32* %ptr) nounwind optsize {
; X32-LABEL: test_tail_call:
; X32: # %bb.0: # %entry
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: movl 8(%ebp), %eax
; X32-NEXT: incl (%eax)
; X32-NEXT: seto %al
; X32-NEXT: lahf
; X32-NEXT: movl %eax, %eax
; X32-NEXT: incb a
; X32-NEXT: sete d
; X32-NEXT: movl %eax, %eax
; X32-NEXT: addb $127, %al
; X32-NEXT: sahf
; X32-NEXT: je .LBB2_1
; X32-NEXT: # %bb.2: # %else
; X32-NEXT: popl %ebp
; X32-NEXT: jmp external_b # TAILCALL
; X32-NEXT: .LBB2_1: # %then
; X32-NEXT: popl %ebp
; X32-NEXT: jmp external_a # TAILCALL
;
; X64-LABEL: test_tail_call:
; X64: # %bb.0: # %entry
; X64-NEXT: pushq %rbp
; X64-NEXT: movq %rsp, %rbp
; X64-NEXT: incl (%rdi)
; X64-NEXT: pushfq
; X64-NEXT: popq %rax
; X64-NEXT: incb {{.*}}(%rip)
; X64-NEXT: sete {{.*}}(%rip)
; X64-NEXT: pushq %rax
; X64-NEXT: popfq
; X64-NEXT: je .LBB2_1
; X64-NEXT: # %bb.2: # %else
; X64-NEXT: popq %rbp
; X64-NEXT: jmp external_b # TAILCALL
; X64-NEXT: .LBB2_1: # %then
; X64-NEXT: popq %rbp
; X64-NEXT: jmp external_a # TAILCALL
entry:
%val = load i32, i32* %ptr
%inc = add i32 %val, 1
store i32 %inc, i32* %ptr
%cmp = icmp eq i32 %inc, 0
%aval = load volatile i8, i8* @a
%inc2 = add i8 %aval, 1
store volatile i8 %inc2, i8* @a
%cmp2 = icmp eq i8 %inc2, 0
%conv5 = zext i1 %cmp2 to i8
store i8 %conv5, i8* @d
br i1 %cmp, label %then, label %else
then:
tail call void @external_a()
ret void
else:
tail call void @external_b()
ret void
}