[X86] Fix some Clang-tidy modernize-use-using and Include What You Use warnings; other minor fixes (NFC).

llvm-svn: 314953
This commit is contained in:
Eugene Zelenko 2017-10-05 00:33:50 +00:00
parent f48e5c9ce5
commit 60433b682f
9 changed files with 239 additions and 132 deletions

View File

@ -1,4 +1,4 @@
//==-- llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h ---------*- C++ -*-==//
//===- llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@ -16,6 +16,22 @@
#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetOpcodes.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
namespace llvm {
/// GlobalISel PatFrag Predicates
@ -120,6 +136,7 @@ bool InstructionSelector::executeMatchTable(
}
break;
}
case GIM_CheckNumOperands: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t Expected = MatchTable[CurrentIdx++];
@ -132,6 +149,7 @@ bool InstructionSelector::executeMatchTable(
}
break;
}
case GIM_CheckImmPredicate: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t Predicate = MatchTable[CurrentIdx++];
@ -170,6 +188,7 @@ bool InstructionSelector::executeMatchTable(
}
break;
}
case GIM_CheckRegBankForClass: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t OpIdx = MatchTable[CurrentIdx++];
@ -186,6 +205,7 @@ bool InstructionSelector::executeMatchTable(
}
break;
}
case GIM_CheckComplexPattern: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t OpIdx = MatchTable[CurrentIdx++];
@ -205,6 +225,7 @@ bool InstructionSelector::executeMatchTable(
}
break;
}
case GIM_CheckConstantInt: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t OpIdx = MatchTable[CurrentIdx++];
@ -220,6 +241,7 @@ bool InstructionSelector::executeMatchTable(
}
break;
}
case GIM_CheckLiteralInt: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t OpIdx = MatchTable[CurrentIdx++];
@ -228,13 +250,14 @@ bool InstructionSelector::executeMatchTable(
<< "]->getOperand(" << OpIdx << "), Value=" << Value
<< ")\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
MachineOperand &OM = State.MIs[InsnID]->getOperand(OpIdx);
if (!OM.isCImm() || !OM.getCImm()->equalsInt(Value)) {
MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
if (!MO.isCImm() || !MO.getCImm()->equalsInt(Value)) {
if (handleReject() == RejectAndGiveUp)
return false;
}
break;
}
case GIM_CheckIntrinsicID: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t OpIdx = MatchTable[CurrentIdx++];
@ -243,12 +266,13 @@ bool InstructionSelector::executeMatchTable(
<< "]->getOperand(" << OpIdx << "), Value=" << Value
<< ")\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
MachineOperand &OM = State.MIs[InsnID]->getOperand(OpIdx);
if (!OM.isIntrinsicID() || OM.getIntrinsicID() != Value)
MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
if (!MO.isIntrinsicID() || MO.getIntrinsicID() != Value)
if (handleReject() == RejectAndGiveUp)
return false;
break;
}
case GIM_CheckIsMBB: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t OpIdx = MatchTable[CurrentIdx++];
@ -261,6 +285,7 @@ bool InstructionSelector::executeMatchTable(
}
break;
}
case GIM_CheckIsSafeToFold: {
int64_t InsnID = MatchTable[CurrentIdx++];
DEBUG(dbgs() << CurrentIdx << ": GIM_CheckIsSafeToFold(MIs[" << InsnID
@ -272,6 +297,7 @@ bool InstructionSelector::executeMatchTable(
}
break;
}
case GIM_Reject:
DEBUG(dbgs() << CurrentIdx << ": GIM_Reject");
if (handleReject() == RejectAndGiveUp)
@ -292,6 +318,7 @@ bool InstructionSelector::executeMatchTable(
<< "], MIs[" << OldInsnID << "], " << NewOpcode << ")\n");
break;
}
case GIR_BuildMI: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t Opcode = MatchTable[CurrentIdx++];
@ -315,6 +342,7 @@ bool InstructionSelector::executeMatchTable(
<< "], MIs[" << OldInsnID << "], " << OpIdx << ")\n");
break;
}
case GIR_CopySubReg: {
int64_t NewInsnID = MatchTable[CurrentIdx++];
int64_t OldInsnID = MatchTable[CurrentIdx++];
@ -328,6 +356,7 @@ bool InstructionSelector::executeMatchTable(
<< SubRegIdx << ")\n");
break;
}
case GIR_AddImplicitDef: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t RegNum = MatchTable[CurrentIdx++];
@ -337,6 +366,7 @@ bool InstructionSelector::executeMatchTable(
<< "], " << RegNum << ")\n");
break;
}
case GIR_AddImplicitUse: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t RegNum = MatchTable[CurrentIdx++];
@ -346,6 +376,7 @@ bool InstructionSelector::executeMatchTable(
<< "], " << RegNum << ")\n");
break;
}
case GIR_AddRegister: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t RegNum = MatchTable[CurrentIdx++];
@ -355,6 +386,7 @@ bool InstructionSelector::executeMatchTable(
<< "], " << RegNum << ")\n");
break;
}
case GIR_AddImm: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t Imm = MatchTable[CurrentIdx++];
@ -364,6 +396,7 @@ bool InstructionSelector::executeMatchTable(
<< Imm << ")\n");
break;
}
case GIR_ComplexRenderer: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t RendererID = MatchTable[CurrentIdx++];
@ -402,6 +435,7 @@ bool InstructionSelector::executeMatchTable(
<< "], " << OpIdx << ", " << RCEnum << ")\n");
break;
}
case GIR_ConstrainSelectedInstOperands: {
int64_t InsnID = MatchTable[CurrentIdx++];
assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
@ -412,6 +446,7 @@ bool InstructionSelector::executeMatchTable(
<< "])\n");
break;
}
case GIR_MergeMemOperands: {
int64_t InsnID = MatchTable[CurrentIdx++];
assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
@ -428,6 +463,7 @@ bool InstructionSelector::executeMatchTable(
DEBUG(dbgs() << ")\n");
break;
}
case GIR_EraseFromParent: {
int64_t InsnID = MatchTable[CurrentIdx++];
assert(State.MIs[InsnID] &&

View File

@ -1,4 +1,4 @@
//===-- llvm/lib/Target/X86/X86CallLowering.cpp - Call lowering -----------===//
//===- llvm/lib/Target/X86/X86CallLowering.cpp - Call lowering ------------===//
//
// The LLVM Compiler Infrastructure
//
@ -6,25 +6,45 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
//
/// \file
/// This file implements the lowering of LLVM calls to machine code calls for
/// GlobalISel.
///
//
//===----------------------------------------------------------------------===//
#include "X86CallLowering.h"
#include "X86CallingConv.h"
#include "X86ISelLowering.h"
#include "X86InstrInfo.h"
#include "X86TargetMachine.h"
#include "X86RegisterInfo.h"
#include "X86Subtarget.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Value.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/LowLevelTypeImpl.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <cassert>
#include <cstdint>
using namespace llvm;
@ -38,7 +58,6 @@ bool X86CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
const DataLayout &DL,
MachineRegisterInfo &MRI,
SplitArgTy PerformArgSplit) const {
const X86TargetLowering &TLI = *getTLI<X86TargetLowering>();
LLVMContext &Context = OrigArg.Ty->getContext();
@ -79,16 +98,16 @@ bool X86CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
}
namespace {
struct OutgoingValueHandler : public CallLowering::ValueHandler {
OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
MachineInstrBuilder &MIB, CCAssignFn *AssignFn)
: ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB), StackSize(0),
: ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB),
DL(MIRBuilder.getMF().getDataLayout()),
STI(MIRBuilder.getMF().getSubtarget<X86Subtarget>()), NumXMMRegs(0) {}
STI(MIRBuilder.getMF().getSubtarget<X86Subtarget>()) {}
unsigned getStackAddress(uint64_t Size, int64_t Offset,
MachinePointerInfo &MPO) override {
LLT p0 = LLT::pointer(0, DL.getPointerSizeInBits(0));
LLT SType = LLT::scalar(DL.getPointerSizeInBits(0));
unsigned SPReg = MRI.createGenericVirtualRegister(p0);
@ -113,7 +132,6 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler {
void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
MachinePointerInfo &MPO, CCValAssign &VA) override {
unsigned ExtReg = extendRegister(ValVReg, VA);
auto MMO = MIRBuilder.getMF().getMachineMemOperand(
MPO, MachineMemOperand::MOStore, VA.getLocVT().getStoreSize(),
@ -124,7 +142,6 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler {
bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
const CallLowering::ArgInfo &Info, CCState &State) override {
bool Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
StackSize = State.getNextStackOffset();
@ -142,16 +159,16 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler {
protected:
MachineInstrBuilder &MIB;
uint64_t StackSize;
uint64_t StackSize = 0;
const DataLayout &DL;
const X86Subtarget &STI;
unsigned NumXMMRegs;
unsigned NumXMMRegs = 0;
};
} // End anonymous namespace.
} // end anonymous namespace
bool X86CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
const Value *Val, unsigned VReg) const {
assert(((Val && VReg) || (!Val && !VReg)) && "Return value without a vreg");
auto MIB = MIRBuilder.buildInstrNoInsert(X86::RET).addImm(0);
@ -182,6 +199,7 @@ bool X86CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
}
namespace {
struct IncomingValueHandler : public CallLowering::ValueHandler {
IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
CCAssignFn *AssignFn)
@ -190,7 +208,6 @@ struct IncomingValueHandler : public CallLowering::ValueHandler {
unsigned getStackAddress(uint64_t Size, int64_t Offset,
MachinePointerInfo &MPO) override {
auto &MFI = MIRBuilder.getMF().getFrameInfo();
int FI = MFI.CreateFixedObject(Size, Offset, true);
MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
@ -203,7 +220,6 @@ struct IncomingValueHandler : public CallLowering::ValueHandler {
void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
MachinePointerInfo &MPO, CCValAssign &VA) override {
auto MMO = MIRBuilder.getMF().getMachineMemOperand(
MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, Size,
0);
@ -241,7 +257,7 @@ protected:
MachineInstrBuilder &MIB;
};
} // namespace
} // end anonymous namespace
bool X86CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
const Function &F,
@ -299,7 +315,6 @@ bool X86CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
const MachineOperand &Callee,
const ArgInfo &OrigRet,
ArrayRef<ArgInfo> OrigArgs) const {
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = *MF.getFunction();
MachineRegisterInfo &MRI = MF.getRegInfo();

View File

@ -1,4 +1,4 @@
//===-- llvm/lib/Target/X86/X86CallLowering.h - Call lowering -----===//
//===- llvm/lib/Target/X86/X86CallLowering.h - Call lowering ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@ -6,24 +6,24 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
//
/// \file
/// This file describes how to lower LLVM calls to machine code calls.
///
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_X86_X86CALLLOWERING
#define LLVM_LIB_TARGET_X86_X86CALLLOWERING
#ifndef LLVM_LIB_TARGET_X86_X86CALLLOWERING_H
#define LLVM_LIB_TARGET_X86_X86CALLLOWERING_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/CodeGen/GlobalISel/CallLowering.h"
#include <functional>
namespace llvm {
class Function;
class MachineIRBuilder;
class DataLayout;
class MachineRegisterInfo;
class X86TargetLowering;
class Value;
class X86CallLowering : public CallLowering {
public:
@ -41,12 +41,14 @@ public:
private:
/// A function of this type is used to perform value split action.
typedef std::function<void(ArrayRef<unsigned>)> SplitArgTy;
using SplitArgTy = std::function<void(ArrayRef<unsigned>)>;
bool splitToValueTypes(const ArgInfo &OrigArgInfo,
SmallVectorImpl<ArgInfo> &SplitArgs,
const DataLayout &DL, MachineRegisterInfo &MRI,
SplitArgTy SplitArg) const;
};
} // namespace llvm
#endif
} // end namespace llvm
#endif // LLVM_LIB_TARGET_X86_X86CALLLOWERING_H

View File

@ -1,4 +1,4 @@
//====-- X86CmovConversion.cpp - Convert Cmov to Branch -------------------===//
//====- X86CmovConversion.cpp - Convert Cmov to Branch --------------------===//
//
// The LLVM Compiler Infrastructure
//
@ -6,6 +6,7 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file implements a pass that converts X86 cmov instructions into
/// branches when profitable. This pass is conservative. It transforms if and
@ -31,26 +32,46 @@
/// 25% branch misprediction.
///
/// Note: This pass is assumed to run on SSA machine code.
//
//===----------------------------------------------------------------------===//
//
// External interfaces:
// FunctionPass *llvm::createX86CmovConverterPass();
// bool X86CmovConverterPass::runOnMachineFunction(MachineFunction &MF);
//
//===----------------------------------------------------------------------===//
#include "X86.h"
#include "X86InstrInfo.h"
#include "X86Subtarget.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/MC/MCSchedule.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
#include <cassert>
#include <iterator>
#include <utility>
using namespace llvm;
#define DEBUG_TYPE "x86-cmov-conversion"
@ -61,10 +82,11 @@ STATISTIC(NumOfLoopCandidate, "Number of CMOV-conversion profitable loops");
STATISTIC(NumOfOptimizedCmovGroups, "Number of optimized CMOV-groups");
namespace llvm {
void initializeX86CmovConverterPassPass(PassRegistry &);
}
namespace {
void initializeX86CmovConverterPassPass(PassRegistry &);
} // end namespace llvm
// This internal switch can be used to turn off the cmov/branch optimization.
static cl::opt<bool>
EnableCmovConverter("x86-cmov-converter",
@ -81,13 +103,14 @@ static cl::opt<bool> ForceMemOperand(
cl::desc("Convert cmovs to branches whenever they have memory operands."),
cl::init(true), cl::Hidden);
namespace {
/// Converts X86 cmov instructions into branches when profitable.
class X86CmovConverterPass : public MachineFunctionPass {
public:
X86CmovConverterPass() : MachineFunctionPass(ID) {
initializeX86CmovConverterPassPass(*PassRegistry::getPassRegistry());
}
~X86CmovConverterPass() {}
StringRef getPassName() const override { return "X86 cmov Conversion"; }
bool runOnMachineFunction(MachineFunction &MF) override;
@ -97,15 +120,14 @@ public:
static char ID;
private:
MachineRegisterInfo *MRI;
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
TargetSchedModel TSchedModel;
/// List of consecutive CMOV instructions.
typedef SmallVector<MachineInstr *, 2> CmovGroup;
typedef SmallVector<CmovGroup, 2> CmovGroups;
using CmovGroup = SmallVector<MachineInstr *, 2>;
using CmovGroups = SmallVector<CmovGroup, 2>;
/// Collect all CMOV-group-candidates in \p CurrLoop and update \p
/// CmovInstGroups accordingly.
@ -132,6 +154,10 @@ private:
void convertCmovInstsToBranches(SmallVectorImpl<MachineInstr *> &Group) const;
};
} // end anonymous namespace
char X86CmovConverterPass::ID = 0;
void X86CmovConverterPass::getAnalysisUsage(AnalysisUsage &AU) const {
MachineFunctionPass::getAnalysisUsage(AU);
AU.addRequired<MachineLoopInfo>();
@ -665,7 +691,7 @@ void X86CmovConverterPass::convertCmovInstsToBranches(
MI.getOperand(X86::getCondFromCMovOpc(MI.getOpcode()) == CC ? 1 : 2)
.getReg();
// Walk back through any intermediate cmovs referenced.
for (;;) {
while (true) {
auto FRIt = FalseBBRegRewriteTable.find(FalseReg);
if (FRIt == FalseBBRegRewriteTable.end())
break;
@ -800,10 +826,6 @@ void X86CmovConverterPass::convertCmovInstsToBranches(
MBB->erase(MIItBegin, MIItEnd);
}
} // End anonymous namespace.
char X86CmovConverterPass::ID = 0;
INITIALIZE_PASS_BEGIN(X86CmovConverterPass, DEBUG_TYPE, "X86 cmov Conversion",
false, false)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)

View File

@ -1,4 +1,4 @@
//===----------------------- X86EvexToVex.cpp ----------------------------===//
//===- X86EvexToVex.cpp ---------------------------------------------------===//
// Compress EVEX instructions to VEX encoding when possible to reduce code size
//
// The LLVM Compiler Infrastructure
@ -6,7 +6,8 @@
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===---------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file defines the pass that goes over all AVX-512 instructions which
/// are encoded using the EVEX prefix and if possible replaces them by their
@ -16,8 +17,8 @@
/// use the xmm or the mask registers or xmm/ymm registers wuith indexes
/// higher than 15.
/// The pass applies code reduction on the generated code for AVX-512 instrs.
///
//===---------------------------------------------------------------------===//
//
//===----------------------------------------------------------------------===//
#include "InstPrinter/X86InstComments.h"
#include "MCTargetDesc/X86BaseInfo.h"
@ -54,7 +55,7 @@ namespace {
class EvexToVexInstPass : public MachineFunctionPass {
/// X86EvexToVexCompressTable - Evex to Vex encoding opcode map.
typedef DenseMap<unsigned, uint16_t> EvexToVexTableType;
using EvexToVexTableType = DenseMap<unsigned, uint16_t>;
EvexToVexTableType EvexToVex128Table;
EvexToVexTableType EvexToVex256Table;
@ -101,10 +102,10 @@ private:
const X86InstrInfo *TII;
};
char EvexToVexInstPass::ID = 0;
} // end anonymous namespace
char EvexToVexInstPass::ID = 0;
bool EvexToVexInstPass::runOnMachineFunction(MachineFunction &MF) {
TII = MF.getSubtarget<X86Subtarget>().getInstrInfo();
@ -176,7 +177,6 @@ bool EvexToVexInstPass::CompressEvexToVexImpl(MachineInstr &MI) const {
if (It != EvexToVex256Table.end())
NewOpc = It->second;
}
// Check for EVEX_V128 or Scalar instructions.
else if (IsEVEX_V128) {
// Search for opcode in the EvexToVex128 table.

View File

@ -1,4 +1,4 @@
//===- X86InstructionSelector.cpp ----------------------------*- C++ -*-==//
//===- X86InstructionSelector.cpp -----------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@ -12,6 +12,9 @@
/// \todo This should be generated by TableGen.
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "X86-isel"
#include "MCTargetDesc/X86BaseInfo.h"
#include "X86InstrBuilder.h"
#include "X86InstrInfo.h"
#include "X86RegisterBankInfo.h"
@ -19,21 +22,31 @@
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/LowLevelTypeImpl.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#define DEBUG_TYPE "X86-isel"
#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
#include "llvm/Target/TargetOpcodes.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include <cassert>
#include <cstdint>
#include <tuple>
using namespace llvm;
@ -205,7 +218,6 @@ static const TargetRegisterClass *getRegClassFromGRPhysReg(unsigned Reg) {
// Set X86 Opcode and constrain DestReg.
bool X86InstructionSelector::selectCopy(MachineInstr &I,
MachineRegisterInfo &MRI) const {
unsigned DstReg = I.getOperand(0).getReg();
const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
@ -432,7 +444,6 @@ unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
static void X86SelectAddress(const MachineInstr &I,
const MachineRegisterInfo &MRI,
X86AddressMode &AM) {
assert(I.getOperand(0).isReg() && "unsupported opperand.");
assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
"unsupported type.");
@ -454,13 +465,11 @@ static void X86SelectAddress(const MachineInstr &I,
// Default behavior.
AM.Base.Reg = I.getOperand(0).getReg();
return;
}
bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
unsigned Opc = I.getOpcode();
assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
@ -537,7 +546,6 @@ bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
assert((I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
"unexpected instruction");
@ -548,7 +556,7 @@ bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
// Can't handle alternate code models yet.
if (TM.getCodeModel() != CodeModel::Small)
return 0;
return false;
X86AddressMode AM;
AM.GV = GV;
@ -584,7 +592,6 @@ bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
bool X86InstructionSelector::selectConstant(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
assert((I.getOpcode() == TargetOpcode::G_CONSTANT) &&
"unexpected instruction");
@ -614,14 +621,13 @@ bool X86InstructionSelector::selectConstant(MachineInstr &I,
case 32:
NewOpc = X86::MOV32ri;
break;
case 64: {
case 64:
// TODO: in case isUInt<32>(Val), X86::MOV32ri can be used
if (isInt<32>(Val))
NewOpc = X86::MOV64ri32;
else
NewOpc = X86::MOV64ri;
break;
}
default:
llvm_unreachable("Can't select G_CONSTANT, unsupported type.");
}
@ -633,7 +639,6 @@ bool X86InstructionSelector::selectConstant(MachineInstr &I,
bool X86InstructionSelector::selectTrunc(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
assert((I.getOpcode() == TargetOpcode::G_TRUNC) && "unexpected instruction");
const unsigned DstReg = I.getOperand(0).getReg();
@ -692,7 +697,6 @@ bool X86InstructionSelector::selectTrunc(MachineInstr &I,
bool X86InstructionSelector::selectZext(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
assert((I.getOpcode() == TargetOpcode::G_ZEXT) && "unexpected instruction");
const unsigned DstReg = I.getOperand(0).getReg();
@ -740,7 +744,6 @@ bool X86InstructionSelector::selectZext(MachineInstr &I,
bool X86InstructionSelector::selectAnyext(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
assert((I.getOpcode() == TargetOpcode::G_ANYEXT) && "unexpected instruction");
const unsigned DstReg = I.getOperand(0).getReg();
@ -790,7 +793,6 @@ bool X86InstructionSelector::selectAnyext(MachineInstr &I,
bool X86InstructionSelector::selectCmp(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
assert((I.getOpcode() == TargetOpcode::G_ICMP) && "unexpected instruction");
X86::CondCode CC;
@ -843,7 +845,6 @@ bool X86InstructionSelector::selectCmp(MachineInstr &I,
bool X86InstructionSelector::selectUadde(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
assert((I.getOpcode() == TargetOpcode::G_UADDE) && "unexpected instruction");
const unsigned DstReg = I.getOperand(0).getReg();
@ -903,7 +904,6 @@ bool X86InstructionSelector::selectUadde(MachineInstr &I,
bool X86InstructionSelector::selectExtract(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
assert((I.getOpcode() == TargetOpcode::G_EXTRACT) &&
"unexpected instruction");
@ -962,7 +962,6 @@ bool X86InstructionSelector::emitExtractSubreg(unsigned DstReg, unsigned SrcReg,
MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
const LLT DstTy = MRI.getType(DstReg);
const LLT SrcTy = MRI.getType(SrcReg);
unsigned SubIdx = X86::NoSubRegister;
@ -1001,7 +1000,6 @@ bool X86InstructionSelector::emitInsertSubreg(unsigned DstReg, unsigned SrcReg,
MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
const LLT DstTy = MRI.getType(DstReg);
const LLT SrcTy = MRI.getType(SrcReg);
unsigned SubIdx = X86::NoSubRegister;
@ -1039,7 +1037,6 @@ bool X86InstructionSelector::emitInsertSubreg(unsigned DstReg, unsigned SrcReg,
bool X86InstructionSelector::selectInsert(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
assert((I.getOpcode() == TargetOpcode::G_INSERT) && "unexpected instruction");
const unsigned DstReg = I.getOperand(0).getReg();
@ -1098,7 +1095,6 @@ bool X86InstructionSelector::selectInsert(MachineInstr &I,
bool X86InstructionSelector::selectUnmergeValues(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
assert((I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
"unexpected instruction");
@ -1108,7 +1104,6 @@ bool X86InstructionSelector::selectUnmergeValues(MachineInstr &I,
unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
MachineInstr &ExtrInst =
*BuildMI(*I.getParent(), I, I.getDebugLoc(),
TII.get(TargetOpcode::G_EXTRACT), I.getOperand(Idx).getReg())
@ -1126,7 +1121,6 @@ bool X86InstructionSelector::selectUnmergeValues(MachineInstr &I,
bool X86InstructionSelector::selectMergeValues(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES) &&
"unexpected instruction");
@ -1147,7 +1141,6 @@ bool X86InstructionSelector::selectMergeValues(MachineInstr &I,
return false;
for (unsigned Idx = 2; Idx < I.getNumOperands(); ++Idx) {
unsigned Tmp = MRI.createGenericVirtualRegister(DstTy);
MRI.setRegBank(Tmp, RegBank);
@ -1177,7 +1170,6 @@ bool X86InstructionSelector::selectMergeValues(MachineInstr &I,
bool X86InstructionSelector::selectCondBranch(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
assert((I.getOpcode() == TargetOpcode::G_BRCOND) && "unexpected instruction");
const unsigned CondReg = I.getOperand(0).getReg();
@ -1199,7 +1191,6 @@ bool X86InstructionSelector::selectCondBranch(MachineInstr &I,
bool X86InstructionSelector::materializeFP(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
assert((I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
"unexpected instruction");
@ -1265,7 +1256,6 @@ bool X86InstructionSelector::materializeFP(MachineInstr &I,
bool X86InstructionSelector::selectImplicitDefOrPHI(
MachineInstr &I, MachineRegisterInfo &MRI) const {
assert((I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
I.getOpcode() == TargetOpcode::G_PHI) &&
"unexpected instruction");

View File

@ -1,25 +1,44 @@
//===--------- X86InterleavedAccess.cpp ----------------------------------===//
//===- X86InterleavedAccess.cpp -------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===--------------------------------------------------------------------===//
///
//===----------------------------------------------------------------------===//
//
/// \file
/// This file contains the X86 implementation of the interleaved accesses
/// optimization generating X86-specific instructions/intrinsics for
/// interleaved access groups.
///
//===--------------------------------------------------------------------===//
//
//===----------------------------------------------------------------------===//
#include "X86TargetMachine.h"
#include "X86ISelLowering.h"
#include "X86Subtarget.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstdint>
using namespace llvm;
namespace {
/// \brief This class holds necessary information to represent an interleaved
/// access group and supports utilities to lower the group into
/// X86-specific instructions/intrinsics.
@ -104,6 +123,7 @@ public:
/// instructions/intrinsics.
bool lowerIntoOptimizedSequence();
};
} // end anonymous namespace
bool X86InterleavedAccessGroup::isSupported() const {
@ -146,7 +166,6 @@ bool X86InterleavedAccessGroup::isSupported() const {
void X86InterleavedAccessGroup::decompose(
Instruction *VecInst, unsigned NumSubVectors, VectorType *SubVecTy,
SmallVectorImpl<Instruction *> &DecomposedVectors) {
assert((isa<LoadInst>(VecInst) || isa<ShuffleVectorInst>(VecInst)) &&
"Expected Load or Shuffle");
@ -211,7 +230,6 @@ static uint32_t Concat[] = {
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 };
// genShuffleBland - Creates shuffle according to two vectors.This function is
// only works on instructions with lane inside 256 registers. According to
// the mask 'Mask' creates a new Mask 'Out' by the offset of the mask. The
@ -289,8 +307,6 @@ static void reorderSubVector(MVT VT, SmallVectorImpl<Value *> &TransposedMatrix,
for (unsigned i = 0; i < Stride; i++)
TransposedMatrix[i] =
Builder.CreateShuffleVector(Temp[2 * i], Temp[2 * i + 1], Concat);
return;
}
void X86InterleavedAccessGroup::interleave8bitStride4VF8(
@ -336,7 +352,6 @@ void X86InterleavedAccessGroup::interleave8bitStride4VF8(
void X86InterleavedAccessGroup::interleave8bitStride4(
ArrayRef<Instruction *> Matrix, SmallVectorImpl<Value *> &TransposedMatrix,
unsigned NumOfElm) {
// Example: Assuming we start from the following vectors:
// Matrix[0]= c0 c1 c2 c3 c4 ... c31
// Matrix[1]= m0 m1 m2 m3 m4 ... m31
@ -452,7 +467,6 @@ static void setGroupSize(MVT VT, SmallVectorImpl<uint32_t> &SizeInfo) {
static void DecodePALIGNRMask(MVT VT, unsigned Imm,
SmallVectorImpl<uint32_t> &ShuffleMask,
bool AlignDirection = true, bool Unary = false) {
unsigned NumElts = VT.getVectorNumElements();
unsigned NumLanes = std::max((int)VT.getSizeInBits() / 128, 1);
unsigned NumLaneElts = NumElts / NumLanes;
@ -517,14 +531,11 @@ static void concatSubVector(Value **Vec, ArrayRef<Instruction *> InVec,
for (int i = 0; i < 3; i++)
Vec[i] = Builder.CreateShuffleVector(Vec[i], Vec[i + 3], Concat);
return;
}
void X86InterleavedAccessGroup::deinterleave8bitStride3(
ArrayRef<Instruction *> InVec, SmallVectorImpl<Value *> &TransposedMatrix,
unsigned VecElems) {
// Example: Assuming we start from the following vectors:
// Matrix[0]= a0 b0 c0 a1 b1 c1 a2 b2
// Matrix[1]= c2 a3 b3 c3 a4 b4 c4 a5
@ -584,8 +595,6 @@ void X86InterleavedAccessGroup::deinterleave8bitStride3(
Vec[0], UndefValue::get(Vec[1]->getType()), VPAlign2);
TransposedMatrix[1] = VecElems == 8 ? Vec[2] : TempVec;
TransposedMatrix[2] = VecElems == 8 ? TempVec : Vec[2];
return;
}
// group2Shuffle reorder the shuffle stride back into continuous order.
@ -613,7 +622,6 @@ static void group2Shuffle(MVT VT, SmallVectorImpl<uint32_t> &Mask,
void X86InterleavedAccessGroup::interleave8bitStride3(
ArrayRef<Instruction *> InVec, SmallVectorImpl<Value *> &TransposedMatrix,
unsigned VecElems) {
// Example: Assuming we start from the following vectors:
// Matrix[0]= a0 a1 a2 a3 a4 a5 a6 a7
// Matrix[1]= b0 b1 b2 b3 b4 b5 b6 b7
@ -670,8 +678,6 @@ void X86InterleavedAccessGroup::interleave8bitStride3(
unsigned NumOfElm = VT.getVectorNumElements();
group2Shuffle(VT, GroupSize, VPShuf);
reorderSubVector(VT, TransposedMatrix, Vec, VPShuf, NumOfElm,3, Builder);
return;
}
void X86InterleavedAccessGroup::transpose_4x4(
@ -834,4 +840,3 @@ bool X86TargetLowering::lowerInterleavedStore(StoreInst *SI,
return Grp.isSupported() && Grp.lowerIntoOptimizedSequence();
}

View File

@ -1,4 +1,4 @@
//===-- X86OptimizeLEAs.cpp - optimize usage of LEA instructions ----------===//
//===- X86OptimizeLEAs.cpp - optimize usage of LEA instructions -----------===//
//
// The LLVM Compiler Infrastructure
//
@ -17,22 +17,36 @@
//
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/X86BaseInfo.h"
#include "X86.h"
#include "X86InstrInfo.h"
#include "X86Subtarget.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Function.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetOpcodes.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include <cassert>
#include <cstdint>
#include <iterator>
using namespace llvm;
@ -60,6 +74,7 @@ static bool isSimilarDispOp(const MachineOperand &MO1,
static inline bool isLEA(const MachineInstr &MI);
namespace {
/// A key based on instruction's memory operands.
class MemOpKey {
public:
@ -92,12 +107,14 @@ public:
// Address' displacement operand.
const MachineOperand *Disp;
};
} // end anonymous namespace
/// Provide DenseMapInfo for MemOpKey.
namespace llvm {
template <> struct DenseMapInfo<MemOpKey> {
typedef DenseMapInfo<const MachineOperand *> PtrInfo;
using PtrInfo = DenseMapInfo<const MachineOperand *>;
static inline MemOpKey getEmptyKey() {
return MemOpKey(PtrInfo::getEmptyKey(), PtrInfo::getEmptyKey(),
@ -164,7 +181,8 @@ template <> struct DenseMapInfo<MemOpKey> {
return LHS == RHS;
}
};
}
} // end namespace llvm
/// \brief Returns a hash table key based on memory operands of \p MI. The
/// number of the first memory operand of \p MI is specified through \p N.
@ -217,6 +235,7 @@ static inline bool isLEA(const MachineInstr &MI) {
}
namespace {
class OptimizeLEAPass : public MachineFunctionPass {
public:
OptimizeLEAPass() : MachineFunctionPass(ID) {}
@ -229,7 +248,7 @@ public:
bool runOnMachineFunction(MachineFunction &MF) override;
private:
typedef DenseMap<MemOpKey, SmallVector<MachineInstr *, 16>> MemOpMap;
using MemOpMap = DenseMap<MemOpKey, SmallVector<MachineInstr *, 16>>;
/// \brief Returns a distance between two instructions inside one basic block.
/// Negative result means, that instructions occur in reverse order.
@ -281,8 +300,10 @@ private:
static char ID;
};
} // end anonymous namespace
char OptimizeLEAPass::ID = 0;
}
FunctionPass *llvm::createX86OptimizeLEAs() { return new OptimizeLEAPass(); }

View File

@ -1,4 +1,4 @@
//===-- X86VZeroUpper.cpp - AVX vzeroupper instruction inserter -----------===//
//===- X86VZeroUpper.cpp - AVX vzeroupper instruction inserter ------------===//
//
// The LLVM Compiler Infrastructure
//
@ -17,14 +17,25 @@
#include "X86.h"
#include "X86InstrInfo.h"
#include "X86Subtarget.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Function.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include <cassert>
using namespace llvm;
#define DEBUG_TYPE "x86-vzeroupper"
@ -35,23 +46,25 @@ namespace {
class VZeroUpperInserter : public MachineFunctionPass {
public:
VZeroUpperInserter() : MachineFunctionPass(ID) {}
bool runOnMachineFunction(MachineFunction &MF) override;
MachineFunctionProperties getRequiredProperties() const override {
return MachineFunctionProperties().set(
MachineFunctionProperties::Property::NoVRegs);
}
StringRef getPassName() const override { return "X86 vzeroupper inserter"; }
private:
void processBasicBlock(MachineBasicBlock &MBB);
void insertVZeroUpper(MachineBasicBlock::iterator I,
MachineBasicBlock &MBB);
void addDirtySuccessor(MachineBasicBlock &MBB);
typedef enum { PASS_THROUGH, EXITS_CLEAN, EXITS_DIRTY } BlockExitState;
using BlockExitState = enum { PASS_THROUGH, EXITS_CLEAN, EXITS_DIRTY };
static const char* getBlockExitStateName(BlockExitState ST);
// Core algorithm state:
@ -73,13 +86,15 @@ namespace {
// to be guarded until we discover a predecessor that
// is DIRTY_OUT.
struct BlockState {
BlockState() : ExitState(PASS_THROUGH), AddedToDirtySuccessors(false) {}
BlockExitState ExitState;
bool AddedToDirtySuccessors;
BlockExitState ExitState = PASS_THROUGH;
bool AddedToDirtySuccessors = false;
MachineBasicBlock::iterator FirstUnguardedCall;
BlockState() = default;
};
typedef SmallVector<BlockState, 8> BlockStateMap;
typedef SmallVector<MachineBasicBlock*, 8> DirtySuccessorsWorkList;
using BlockStateMap = SmallVector<BlockState, 8>;
using DirtySuccessorsWorkList = SmallVector<MachineBasicBlock *, 8>;
BlockStateMap BlockStates;
DirtySuccessorsWorkList DirtySuccessors;
@ -90,8 +105,9 @@ namespace {
static char ID;
};
char VZeroUpperInserter::ID = 0;
}
} // end anonymous namespace
char VZeroUpperInserter::ID = 0;
FunctionPass *llvm::createX86IssueVZeroUpperPass() {
return new VZeroUpperInserter();