Copy utilities updated and added for MI flags

Summary: This patch adds a GlobalIsel copy utility into MI for flags and updates the instruction emitter for the SDAG path.  Some tests show new behavior and I added one for GlobalIsel which mirrors an SDAG test for handling nsw/nuw.

Reviewers: spatel, wristow, arsenm

Reviewed By: arsenm

Subscribers: wdng

Differential Revision: https://reviews.llvm.org/D52006

llvm-svn: 342576
This commit is contained in:
Michael Berg 2018-09-19 18:52:08 +00:00
parent 1a1c0ee599
commit 894c39f770
7 changed files with 284 additions and 3 deletions

View File

@ -1526,6 +1526,9 @@ public:
/// not modify the MIFlags of this MachineInstr.
uint16_t mergeFlagsWith(const MachineInstr& Other) const;
/// Copy all flags to MachineInst MIFlags
void copyIRFlags(const Instruction &I);
/// Break any tie involving OpIdx.
void untieRegOperand(unsigned OpIdx) {
MachineOperand &MO = getOperand(OpIdx);

View File

@ -279,7 +279,12 @@ bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
unsigned Res = getOrCreateVReg(U);
MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
auto FBinOp = MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
if (isa<Instruction>(U)) {
MachineInstr *FBinOpMI = FBinOp.getInstr();
const Instruction &I = cast<Instruction>(U);
FBinOpMI->copyIRFlags(I);
}
return true;
}

View File

@ -52,6 +52,7 @@
#include "llvm/IR/ModuleSlotTracker.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/Operator.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSymbol.h"
@ -517,6 +518,41 @@ uint16_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const {
return getFlags() | Other.getFlags();
}
void MachineInstr::copyIRFlags(const Instruction &I) {
// Copy the wrapping flags.
if (const OverflowingBinaryOperator *OB =
dyn_cast<OverflowingBinaryOperator>(&I)) {
if (OB->hasNoSignedWrap())
setFlag(MachineInstr::MIFlag::NoSWrap);
if (OB->hasNoUnsignedWrap())
setFlag(MachineInstr::MIFlag::NoUWrap);
}
// Copy the exact flag.
if (const PossiblyExactOperator *PE = dyn_cast<PossiblyExactOperator>(&I))
if (PE->isExact())
setFlag(MachineInstr::MIFlag::IsExact);
// Copy the fast-math flags.
if (const FPMathOperator *FP = dyn_cast<FPMathOperator>(&I)) {
const FastMathFlags Flags = FP->getFastMathFlags();
if (Flags.noNaNs())
setFlag(MachineInstr::MIFlag::FmNoNans);
if (Flags.noInfs())
setFlag(MachineInstr::MIFlag::FmNoInfs);
if (Flags.noSignedZeros())
setFlag(MachineInstr::MIFlag::FmNsz);
if (Flags.allowReciprocal())
setFlag(MachineInstr::MIFlag::FmArcp);
if (Flags.allowContract())
setFlag(MachineInstr::MIFlag::FmContract);
if (Flags.approxFunc())
setFlag(MachineInstr::MIFlag::FmAfn);
if (Flags.allowReassoc())
setFlag(MachineInstr::MIFlag::FmReassoc);
}
}
bool MachineInstr::hasPropertyInBundle(uint64_t Mask, QueryType Type) const {
assert(!isBundledWithPred() && "Must be called on bundle header");
for (MachineBasicBlock::const_instr_iterator MII = getIterator();; ++MII) {

View File

@ -868,6 +868,15 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
if (Flags.hasAllowReassociation())
MI->setFlag(MachineInstr::MIFlag::FmReassoc);
if (Flags.hasNoUnsignedWrap())
MI->setFlag(MachineInstr::MIFlag::NoUWrap);
if (Flags.hasNoSignedWrap())
MI->setFlag(MachineInstr::MIFlag::NoSWrap);
if (Flags.hasExact())
MI->setFlag(MachineInstr::MIFlag::IsExact);
}
// Emit all of the actual operands of this instruction, adding them to the

View File

@ -9,7 +9,7 @@
; CHECK: New block
; CHECK: %[[REG:([0-9]+)]]:intregs = PHI %{{.*}}, %[[REG1:([0-9]+)]]
; CHECK: %[[REG1]]:intregs = A2_addi
; CHECK: %[[REG1]]:intregs = nuw A2_addi
; CHECK: epilog:
; CHECK: %{{[0-9]+}}:intregs = PHI %{{.*}}, %[[REG]]

View File

@ -0,0 +1,228 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -global-isel | FileCheck %s
; The fundamental problem: an add separated from other arithmetic by a sign or
; zero extension can't be combined with the later instructions. However, if the
; first add is 'nsw' or 'nuw' respectively, then we can promote the extension
; ahead of that add to allow optimizations.
define i64 @add_nsw_consts(i32 %i) {
; CHECK-LABEL: add_nsw_consts:
; CHECK: # %bb.0:
; CHECK-NEXT: addl $5, %edi
; CHECK-NEXT: movslq %edi, %rax
; CHECK-NEXT: addq $7, %rax
; CHECK-NEXT: retq
%add = add nsw i32 %i, 5
%ext = sext i32 %add to i64
%idx = add i64 %ext, 7
ret i64 %idx
}
; An x86 bonus: If we promote the sext ahead of the 'add nsw',
; we allow LEA formation and eliminate an add instruction.
define i64 @add_nsw_sext_add(i32 %i, i64 %x) {
; CHECK-LABEL: add_nsw_sext_add:
; CHECK: # %bb.0:
; CHECK-NEXT: addl $5, %edi
; CHECK-NEXT: movslq %edi, %rax
; CHECK-NEXT: addq %rsi, %rax
; CHECK-NEXT: retq
%add = add nsw i32 %i, 5
%ext = sext i32 %add to i64
%idx = add i64 %x, %ext
ret i64 %idx
}
; Throw in a scale (left shift) because an LEA can do that too.
; Use a negative constant (LEA displacement) to verify that's handled correctly.
define i64 @add_nsw_sext_lsh_add(i32 %i, i64 %x) {
; CHECK-LABEL: add_nsw_sext_lsh_add:
; CHECK: # %bb.0:
; CHECK-NEXT: addl $-5, %edi
; CHECK-NEXT: movslq %edi, %rax
; CHECK-NEXT: movq $3, %rcx
; CHECK: retq
%add = add nsw i32 %i, -5
%ext = sext i32 %add to i64
%shl = shl i64 %ext, 3
%idx = add i64 %x, %shl
ret i64 %idx
}
; Don't promote the sext if it has no users. The wider add instruction needs an
; extra byte to encode.
define i64 @add_nsw_sext(i32 %i, i64 %x) {
; CHECK-LABEL: add_nsw_sext:
; CHECK: # %bb.0:
; CHECK-NEXT: addl $5, %edi
; CHECK-NEXT: movslq %edi, %rax
; CHECK-NEXT: retq
%add = add nsw i32 %i, 5
%ext = sext i32 %add to i64
ret i64 %ext
}
; The typical use case: a 64-bit system where an 'int' is used as an index into an array.
define i8* @gep8(i32 %i, i8* %x) {
; CHECK-LABEL: gep8:
; CHECK: # %bb.0:
; CHECK-NEXT: addl $5, %edi
; CHECK-NEXT: movslq %edi, %rax
; CHECK-NEXT: leaq (%rsi,%rax), %rax
; CHECK-NEXT: retq
%add = add nsw i32 %i, 5
%ext = sext i32 %add to i64
%idx = getelementptr i8, i8* %x, i64 %ext
ret i8* %idx
}
define i16* @gep16(i32 %i, i16* %x) {
; CHECK-LABEL: gep16:
; CHECK: # %bb.0:
; CHECK-NEXT: movq $2, %rax
; CHECK-NEXT: addl $-5, %edi
; CHECK-NEXT: movslq %edi, %rcx
; CHECK-NEXT: imulq %rax, %rcx
; CHECK-NEXT: leaq (%rsi,%rcx), %rax
; CHECK-NEXT: retq
%add = add nsw i32 %i, -5
%ext = sext i32 %add to i64
%idx = getelementptr i16, i16* %x, i64 %ext
ret i16* %idx
}
define i32* @gep32(i32 %i, i32* %x) {
; CHECK-LABEL: gep32:
; CHECK: # %bb.0:
; CHECK-NEXT: movq $4, %rax
; CHECK-NEXT: addl $5, %edi
; CHECK-NEXT: movslq %edi, %rcx
; CHECK-NEXT: imulq %rax, %rcx
; CHECK-NEXT: leaq (%rsi,%rcx), %rax
; CHECK-NEXT: retq
%add = add nsw i32 %i, 5
%ext = sext i32 %add to i64
%idx = getelementptr i32, i32* %x, i64 %ext
ret i32* %idx
}
define i64* @gep64(i32 %i, i64* %x) {
; CHECK-LABEL: gep64:
; CHECK: # %bb.0:
; CHECK-NEXT: movq $8, %rax
; CHECK-NEXT: addl $-5, %edi
; CHECK-NEXT: movslq %edi, %rcx
; CHECK-NEXT: imulq %rax, %rcx
; CHECK-NEXT: leaq (%rsi,%rcx), %rax
; CHECK-NEXT: retq
%add = add nsw i32 %i, -5
%ext = sext i32 %add to i64
%idx = getelementptr i64, i64* %x, i64 %ext
ret i64* %idx
}
; LEA can't scale by 16, but the adds can still be combined into an LEA.
define i128* @gep128(i32 %i, i128* %x) {
; CHECK-LABEL: gep128:
; CHECK: # %bb.0:
; CHECK-NEXT: movq $16, %rax
; CHECK-NEXT: addl $5, %edi
; CHECK-NEXT: movslq %edi, %rcx
; CHECK-NEXT: imulq %rax, %rcx
; CHECK-NEXT: leaq (%rsi,%rcx), %rax
; CHECK-NEXT: retq
%add = add nsw i32 %i, 5
%ext = sext i32 %add to i64
%idx = getelementptr i128, i128* %x, i64 %ext
ret i128* %idx
}
; A bigger win can be achieved when there is more than one use of the
; sign extended value. In this case, we can eliminate sign extension
; instructions plus use more efficient addressing modes for memory ops.
define void @PR20134(i32* %a, i32 %i) {
; CHECK-LABEL: PR20134:
; CHECK: # %bb.0:
; CHECK: movq $4, %rax
; CHECK-NEXT: leal 1(%rsi), %ecx
; CHECK-NEXT: movslq %ecx, %rcx
; CHECK-NEXT: imulq %rax, %rcx
; CHECK-NEXT: leaq (%rdi,%rcx), %rcx
; CHECK-NEXT: leal 2(%rsi), %edx
; CHECK-NEXT: movslq %edx, %rdx
; CHECK-NEXT: imulq %rax, %rdx
; CHECK-NEXT: leaq (%rdi,%rdx), %rdx
; CHECK-NEXT: movl (%rdx), %edx
; CHECK-NEXT: addl (%rcx), %edx
; CHECK-NEXT: movslq %esi, %rcx
; CHECK-NEXT: imulq %rax, %rcx
; CHECK-NEXT: leaq (%rdi,%rcx), %rax
; CHECK-NEXT: movl %edx, (%rax)
; CHECK-NEXT: retq
%add1 = add nsw i32 %i, 1
%idx1 = sext i32 %add1 to i64
%gep1 = getelementptr i32, i32* %a, i64 %idx1
%load1 = load i32, i32* %gep1, align 4
%add2 = add nsw i32 %i, 2
%idx2 = sext i32 %add2 to i64
%gep2 = getelementptr i32, i32* %a, i64 %idx2
%load2 = load i32, i32* %gep2, align 4
%add3 = add i32 %load1, %load2
%idx3 = sext i32 %i to i64
%gep3 = getelementptr i32, i32* %a, i64 %idx3
store i32 %add3, i32* %gep3, align 4
ret void
}
; The same as @PR20134 but sign extension is replaced with zero extension
define void @PR20134_zext(i32* %a, i32 %i) {
; CHECK: # %bb.0:
; CHECK: movq $4, %rax
; CHECK-NEXT: leal 1(%rsi), %ecx
; CHECK-NEXT: imulq %rax, %rcx
; CHECK-NEXT: leaq (%rdi,%rcx), %rcx
; CHECK-NEXT: leal 2(%rsi), %edx
; CHECK-NEXT: imulq %rax, %rdx
; CHECK-NEXT: leaq (%rdi,%rdx), %rdx
; CHECK-NEXT: movl (%rdx), %edx
; CHECK-NEXT: addl (%rcx), %edx
; CHECK-NEXT: imulq %rax, %rsi
; CHECK-NEXT: leaq (%rdi,%rsi), %rax
; CHECK-NEXT: movl %edx, (%rax)
; CHECK-NEXT: retq
%add1 = add nuw i32 %i, 1
%idx1 = zext i32 %add1 to i64
%gep1 = getelementptr i32, i32* %a, i64 %idx1
%load1 = load i32, i32* %gep1, align 4
%add2 = add nuw i32 %i, 2
%idx2 = zext i32 %add2 to i64
%gep2 = getelementptr i32, i32* %a, i64 %idx2
%load2 = load i32, i32* %gep2, align 4
%add3 = add i32 %load1, %load2
%idx3 = zext i32 %i to i64
%gep3 = getelementptr i32, i32* %a, i64 %idx3
store i32 %add3, i32* %gep3, align 4
ret void
}

View File

@ -26,7 +26,7 @@
; CHECK: SUB64rr [[VREG2]], [[VREG1]]
; CHECK-NEXT: JNE_1 {{.*}}, debug-location [[DLOC]]{{$}}
; CHECK: [[VREG3:%[^ ]+]]:gr64 = PHI [[VREG2]]
; CHECK: [[VREG4:%[^ ]+]]:gr64 = ADD64ri8 [[VREG3]], 4
; CHECK: [[VREG4:%[^ ]+]]:gr64 = nuw ADD64ri8 [[VREG3]], 4
; CHECK: SUB64rr [[VREG1]], [[VREG4]]
; CHECK-NEXT: JNE_1 {{.*}}, debug-location [[DLOC]]{{$}}
; CHECK-NEXT: JMP_1 {{.*}}, debug-location [[DLOC]]{{$}}