Re-commit r315885: [globalisel][tblgen] Add support for iPTR and implement am_unscaled* and am_indexed*

Summary:
iPTR is a pointer of subtarget-specific size to any address space. Therefore
type checks on this size derive the SizeInBits from a subtarget hook.

At this point, we can import the simplests G_LOAD rules and select load
instructions using them. Further patches will support for the predicates to
enable additional loads as well as the stores.

The previous commit failed on MSVC due to a failure to convert an
initializer_list to a std::vector. Hopefully, MSVC will accept this version.

Depends on D37457

Reviewers: ab, qcolombet, t.p.northover, rovka, aditya_nandakumar

Reviewed By: qcolombet

Subscribers: kristof.beyls, javed.absar, llvm-commits, igorb

Differential Revision: https://reviews.llvm.org/D37458

llvm-svn: 315887
This commit is contained in:
Daniel Sanders 2017-10-16 03:36:29 +00:00
parent ce72d611af
commit ea8711b88e
8 changed files with 229 additions and 25 deletions

View File

@ -296,7 +296,7 @@ public:
const I64ImmediatePredicateFn *I64ImmPredicateFns; const I64ImmediatePredicateFn *I64ImmPredicateFns;
const APIntImmediatePredicateFn *APIntImmPredicateFns; const APIntImmediatePredicateFn *APIntImmPredicateFns;
const APFloatImmediatePredicateFn *APFloatImmPredicateFns; const APFloatImmediatePredicateFn *APFloatImmPredicateFns;
const std::vector<ComplexMatcherMemFn> ComplexPredicates; const ComplexMatcherMemFn *ComplexPredicates;
}; };
protected: protected:
@ -341,6 +341,12 @@ protected:
bool isOperandImmEqual(const MachineOperand &MO, int64_t Value, bool isOperandImmEqual(const MachineOperand &MO, int64_t Value,
const MachineRegisterInfo &MRI) const; const MachineRegisterInfo &MRI) const;
/// Return true if the specified operand is a G_GEP with a G_CONSTANT on the
/// right-hand side. GlobalISel's separation of pointer and integer types
/// means that we don't need to worry about G_OR with equivalent semantics.
bool isBaseWithConstantOffset(const MachineOperand &Root,
const MachineRegisterInfo &MRI) const;
bool isObviouslySafeToFold(MachineInstr &MI) const; bool isObviouslySafeToFold(MachineInstr &MI) const;
}; };

View File

@ -248,10 +248,20 @@ bool InstructionSelector::executeMatchTable(
int64_t InsnID = MatchTable[CurrentIdx++]; int64_t InsnID = MatchTable[CurrentIdx++];
int64_t OpIdx = MatchTable[CurrentIdx++]; int64_t OpIdx = MatchTable[CurrentIdx++];
int64_t SizeInBits = MatchTable[CurrentIdx++]; int64_t SizeInBits = MatchTable[CurrentIdx++];
DEBUG(dbgs() << CurrentIdx << ": GIM_CheckPointerToAny(MIs[" << InsnID DEBUG(dbgs() << CurrentIdx << ": GIM_CheckPointerToAny(MIs[" << InsnID
<< "]->getOperand(" << OpIdx << "]->getOperand(" << OpIdx
<< "), SizeInBits=" << SizeInBits << ")\n"); << "), SizeInBits=" << SizeInBits << ")\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined"); assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
// iPTR must be looked up in the target.
if (SizeInBits == 0) {
MachineFunction *MF = State.MIs[InsnID]->getParent()->getParent();
SizeInBits = MF->getDataLayout().getPointerSizeInBits(0);
}
assert(SizeInBits != 0 && "Pointer size must be known");
const LLT &Ty = MRI.getType(State.MIs[InsnID]->getOperand(OpIdx).getReg()); const LLT &Ty = MRI.getType(State.MIs[InsnID]->getOperand(OpIdx).getReg());
if (!Ty.isPointer() || Ty.getSizeInBits() != SizeInBits) { if (!Ty.isPointer() || Ty.getSizeInBits() != SizeInBits) {
if (handleReject() == RejectAndGiveUp) if (handleReject() == RejectAndGiveUp)

View File

@ -18,6 +18,7 @@
#include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/MC/MCInstrDesc.h" #include "llvm/MC/MCInstrDesc.h"
#include "llvm/Support/Debug.h" #include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h" #include "llvm/Support/raw_ostream.h"
@ -98,6 +99,23 @@ bool InstructionSelector::isOperandImmEqual(
return false; return false;
} }
bool InstructionSelector::isBaseWithConstantOffset(
const MachineOperand &Root, const MachineRegisterInfo &MRI) const {
if (!Root.isReg())
return false;
MachineInstr *RootI = MRI.getVRegDef(Root.getReg());
if (RootI->getOpcode() != TargetOpcode::G_GEP)
return false;
MachineOperand &RHS = RootI->getOperand(2);
MachineInstr *RHSI = MRI.getVRegDef(RHS.getReg());
if (RHSI->getOpcode() != TargetOpcode::G_CONSTANT)
return false;
return true;
}
bool InstructionSelector::isObviouslySafeToFold(MachineInstr &MI) const { bool InstructionSelector::isObviouslySafeToFold(MachineInstr &MI) const {
return !MI.mayLoadOrStore() && !MI.hasUnmodeledSideEffects() && return !MI.mayLoadOrStore() && !MI.hasUnmodeledSideEffects() &&
MI.implicit_operands().begin() == MI.implicit_operands().end(); MI.implicit_operands().begin() == MI.implicit_operands().end();

View File

@ -2516,6 +2516,22 @@ def am_indexed32 : ComplexPattern<i64, 2, "SelectAddrModeIndexed32", []>;
def am_indexed64 : ComplexPattern<i64, 2, "SelectAddrModeIndexed64", []>; def am_indexed64 : ComplexPattern<i64, 2, "SelectAddrModeIndexed64", []>;
def am_indexed128 : ComplexPattern<i64, 2, "SelectAddrModeIndexed128", []>; def am_indexed128 : ComplexPattern<i64, 2, "SelectAddrModeIndexed128", []>;
def gi_am_indexed8 :
GIComplexOperandMatcher<s64, "selectAddrModeIndexed<8>">,
GIComplexPatternEquiv<am_indexed8>;
def gi_am_indexed16 :
GIComplexOperandMatcher<s64, "selectAddrModeIndexed<16>">,
GIComplexPatternEquiv<am_indexed16>;
def gi_am_indexed32 :
GIComplexOperandMatcher<s64, "selectAddrModeIndexed<32>">,
GIComplexPatternEquiv<am_indexed32>;
def gi_am_indexed64 :
GIComplexOperandMatcher<s64, "selectAddrModeIndexed<64>">,
GIComplexPatternEquiv<am_indexed64>;
def gi_am_indexed128 :
GIComplexOperandMatcher<s64, "selectAddrModeIndexed<128>">,
GIComplexPatternEquiv<am_indexed128>;
class UImm12OffsetOperand<int Scale> : AsmOperandClass { class UImm12OffsetOperand<int Scale> : AsmOperandClass {
let Name = "UImm12Offset" # Scale; let Name = "UImm12Offset" # Scale;
let RenderMethod = "addUImm12OffsetOperands<" # Scale # ">"; let RenderMethod = "addUImm12OffsetOperands<" # Scale # ">";
@ -3146,6 +3162,23 @@ def am_unscaled32 : ComplexPattern<i64, 2, "SelectAddrModeUnscaled32", []>;
def am_unscaled64 : ComplexPattern<i64, 2, "SelectAddrModeUnscaled64", []>; def am_unscaled64 : ComplexPattern<i64, 2, "SelectAddrModeUnscaled64", []>;
def am_unscaled128 :ComplexPattern<i64, 2, "SelectAddrModeUnscaled128", []>; def am_unscaled128 :ComplexPattern<i64, 2, "SelectAddrModeUnscaled128", []>;
def gi_am_unscaled8 :
GIComplexOperandMatcher<s64, "selectAddrModeUnscaled8">,
GIComplexPatternEquiv<am_unscaled8>;
def gi_am_unscaled16 :
GIComplexOperandMatcher<s64, "selectAddrModeUnscaled16">,
GIComplexPatternEquiv<am_unscaled16>;
def gi_am_unscaled32 :
GIComplexOperandMatcher<s64, "selectAddrModeUnscaled32">,
GIComplexPatternEquiv<am_unscaled32>;
def gi_am_unscaled64 :
GIComplexOperandMatcher<s64, "selectAddrModeUnscaled64">,
GIComplexPatternEquiv<am_unscaled64>;
def gi_am_unscaled128 :
GIComplexOperandMatcher<s64, "selectAddrModeUnscaled128">,
GIComplexPatternEquiv<am_unscaled128>;
class BaseLoadStoreUnscale<bits<2> sz, bit V, bits<2> opc, dag oops, dag iops, class BaseLoadStoreUnscale<bits<2> sz, bit V, bits<2> opc, dag oops, dag iops,
string asm, list<dag> pattern> string asm, list<dag> pattern>
: I<oops, iops, asm, "\t$Rt, [$Rn, $offset]", "", pattern> { : I<oops, iops, asm, "\t$Rt, [$Rn, $offset]", "", pattern> {

View File

@ -66,6 +66,32 @@ private:
ComplexRendererFn selectArithImmed(MachineOperand &Root) const; ComplexRendererFn selectArithImmed(MachineOperand &Root) const;
ComplexRendererFn selectAddrModeUnscaled(MachineOperand &Root,
unsigned Size) const;
ComplexRendererFn selectAddrModeUnscaled8(MachineOperand &Root) const {
return selectAddrModeUnscaled(Root, 1);
}
ComplexRendererFn selectAddrModeUnscaled16(MachineOperand &Root) const {
return selectAddrModeUnscaled(Root, 2);
}
ComplexRendererFn selectAddrModeUnscaled32(MachineOperand &Root) const {
return selectAddrModeUnscaled(Root, 4);
}
ComplexRendererFn selectAddrModeUnscaled64(MachineOperand &Root) const {
return selectAddrModeUnscaled(Root, 8);
}
ComplexRendererFn selectAddrModeUnscaled128(MachineOperand &Root) const {
return selectAddrModeUnscaled(Root, 16);
}
ComplexRendererFn selectAddrModeIndexed(MachineOperand &Root,
unsigned Size) const;
template <int Width>
ComplexRendererFn selectAddrModeIndexed(MachineOperand &Root) const {
return selectAddrModeIndexed(Root, Width / 8);
}
const AArch64TargetMachine &TM; const AArch64TargetMachine &TM;
const AArch64Subtarget &STI; const AArch64Subtarget &STI;
const AArch64InstrInfo &TII; const AArch64InstrInfo &TII;
@ -1392,6 +1418,105 @@ AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const {
}}; }};
} }
/// Select a "register plus unscaled signed 9-bit immediate" address. This
/// should only match when there is an offset that is not valid for a scaled
/// immediate addressing mode. The "Size" argument is the size in bytes of the
/// memory reference, which is needed here to know what is valid for a scaled
/// immediate.
InstructionSelector::ComplexRendererFn
AArch64InstructionSelector::selectAddrModeUnscaled(MachineOperand &Root,
unsigned Size) const {
MachineRegisterInfo &MRI =
Root.getParent()->getParent()->getParent()->getRegInfo();
if (!Root.isReg())
return None;
if (!isBaseWithConstantOffset(Root, MRI))
return None;
MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
if (!RootDef)
return None;
MachineOperand &OffImm = RootDef->getOperand(2);
if (!OffImm.isReg())
return None;
MachineInstr *RHS = MRI.getVRegDef(OffImm.getReg());
if (!RHS || RHS->getOpcode() != TargetOpcode::G_CONSTANT)
return None;
int64_t RHSC;
MachineOperand &RHSOp1 = RHS->getOperand(1);
if (!RHSOp1.isCImm() || RHSOp1.getCImm()->getBitWidth() > 64)
return None;
RHSC = RHSOp1.getCImm()->getSExtValue();
// If the offset is valid as a scaled immediate, don't match here.
if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Log2_32(Size)))
return None;
if (RHSC >= -256 && RHSC < 256) {
MachineOperand &Base = RootDef->getOperand(1);
return {{
[=](MachineInstrBuilder &MIB) { MIB.add(Base); },
[=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
}};
}
return None;
}
/// Select a "register plus scaled unsigned 12-bit immediate" address. The
/// "Size" argument is the size in bytes of the memory reference, which
/// determines the scale.
InstructionSelector::ComplexRendererFn
AArch64InstructionSelector::selectAddrModeIndexed(MachineOperand &Root,
unsigned Size) const {
MachineRegisterInfo &MRI =
Root.getParent()->getParent()->getParent()->getRegInfo();
if (!Root.isReg())
return None;
MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
if (!RootDef)
return None;
if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
return {{
[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
[=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
}};
}
if (isBaseWithConstantOffset(Root, MRI)) {
MachineOperand &LHS = RootDef->getOperand(1);
MachineOperand &RHS = RootDef->getOperand(2);
MachineInstr *LHSDef = MRI.getVRegDef(LHS.getReg());
MachineInstr *RHSDef = MRI.getVRegDef(RHS.getReg());
if (LHSDef && RHSDef) {
int64_t RHSC = (int64_t)RHSDef->getOperand(1).getCImm()->getZExtValue();
unsigned Scale = Log2_32(Size);
if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
LHSDef = MRI.getVRegDef(LHSDef->getOperand(1).getReg());
return {{
[=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
[=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
}};
}
}
}
// Before falling back to our general case, check if the unscaled
// instructions can handle this. If so, that's preferable.
if (selectAddrModeUnscaled(Root, Size).hasValue())
return None;
return {{
[=](MachineInstrBuilder &MIB) { MIB.add(Root); },
[=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
}};
}
namespace llvm { namespace llvm {
InstructionSelector * InstructionSelector *
createAArch64InstructionSelector(const AArch64TargetMachine &TM, createAArch64InstructionSelector(const AArch64TargetMachine &TM,

View File

@ -1,9 +1,5 @@
# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s # RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
# This patch temporarily causes LD1Onev1d to match instead of LDRDui on a
# couple functions. A patch to support iPTR will follow that fixes this.
# XFAIL: *
--- | --- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
@ -533,13 +529,13 @@ registers:
# CHECK: body: # CHECK: body:
# CHECK: %0 = COPY %x0 # CHECK: %0 = COPY %x0
# CHECK: %1 = LD1Onev2s %0 # CHECK: %1 = LDRDui %0, 0 :: (load 8 from %ir.addr)
# CHECK: %d0 = COPY %1 # CHECK: %d0 = COPY %1
body: | body: |
bb.0: bb.0:
liveins: %x0 liveins: %x0
%0(p0) = COPY %x0 %0(p0) = COPY %x0
%1(<2 x s32>) = G_LOAD %0 :: (load 4 from %ir.addr) %1(<2 x s32>) = G_LOAD %0 :: (load 8 from %ir.addr)
%d0 = COPY %1(<2 x s32>) %d0 = COPY %1(<2 x s32>)
... ...

View File

@ -55,15 +55,12 @@ def HasC : Predicate<"Subtarget->hasC()"> { let RecomputePerFunction = 1; }
// CHECK-NEXT: mutable MatcherState State; // CHECK-NEXT: mutable MatcherState State;
// CHECK-NEXT: typedef ComplexRendererFn(MyTargetInstructionSelector::*ComplexMatcherMemFn)(MachineOperand &) const; // CHECK-NEXT: typedef ComplexRendererFn(MyTargetInstructionSelector::*ComplexMatcherMemFn)(MachineOperand &) const;
// CHECK-NEXT: const MatcherInfoTy<PredicateBitset, ComplexMatcherMemFn> MatcherInfo; // CHECK-NEXT: const MatcherInfoTy<PredicateBitset, ComplexMatcherMemFn> MatcherInfo;
// CHECK-NEXT: static MyTargetInstructionSelector::ComplexMatcherMemFn ComplexPredicateFns[];
// CHECK-NEXT: #endif // ifdef GET_GLOBALISEL_TEMPORARIES_DECL // CHECK-NEXT: #endif // ifdef GET_GLOBALISEL_TEMPORARIES_DECL
// CHECK-LABEL: #ifdef GET_GLOBALISEL_TEMPORARIES_INIT // CHECK-LABEL: #ifdef GET_GLOBALISEL_TEMPORARIES_INIT
// CHECK-NEXT: , State(2), // CHECK-NEXT: , State(2),
// CHECK-NEXT: MatcherInfo({TypeObjects, FeatureBitsets, I64ImmPredicateFns, APIntImmPredicateFns, APFloatImmPredicateFns, { // CHECK-NEXT: MatcherInfo({TypeObjects, FeatureBitsets, I64ImmPredicateFns, APIntImmPredicateFns, APFloatImmPredicateFns, ComplexPredicateFns})
// CHECK-NEXT: nullptr, // GICP_Invalid
// CHECK-NEXT: &MyTargetInstructionSelector::selectComplexPattern, // gi_complex
// CHECK-NEXT: &MyTargetInstructionSelector::selectComplexPatternRR, // gi_complex_rr
// CHECK-NEXT: }})
// CHECK-NEXT: #endif // ifdef GET_GLOBALISEL_TEMPORARIES_INIT // CHECK-NEXT: #endif // ifdef GET_GLOBALISEL_TEMPORARIES_INIT
// CHECK-LABEL: enum SubtargetFeatureBits : uint8_t { // CHECK-LABEL: enum SubtargetFeatureBits : uint8_t {
@ -147,6 +144,13 @@ def HasC : Predicate<"Subtarget->hasC()"> { let RecomputePerFunction = 1; }
// CHECK-NEXT: Predicate_simm9, // CHECK-NEXT: Predicate_simm9,
// CHECK-NEXT: }; // CHECK-NEXT: };
// CHECK-LABEL: MyTargetInstructionSelector::ComplexMatcherMemFn
// CHECK-NEXT: MyTargetInstructionSelector::ComplexPredicateFns[] = {
// CHECK-NEXT: nullptr, // GICP_Invalid
// CHECK-NEXT: &MyTargetInstructionSelector::selectComplexPattern, // gi_complex
// CHECK-NEXT: &MyTargetInstructionSelector::selectComplexPatternRR, // gi_complex_rr
// CHECK-NEXT: }
// CHECK: bool MyTargetInstructionSelector::selectImpl(MachineInstr &I) const { // CHECK: bool MyTargetInstructionSelector::selectImpl(MachineInstr &I) const {
// CHECK-NEXT: MachineFunction &MF = *I.getParent()->getParent(); // CHECK-NEXT: MachineFunction &MF = *I.getParent()->getParent();
// CHECK-NEXT: MachineRegisterInfo &MRI = MF.getRegInfo(); // CHECK-NEXT: MachineRegisterInfo &MRI = MF.getRegInfo();

View File

@ -775,8 +775,8 @@ std::set<LLTCodeGen> LLTOperandMatcher::KnownTypes;
/// no reliable means to derive the missing type information from the pattern so /// no reliable means to derive the missing type information from the pattern so
/// imported rules must test the components of a pointer separately. /// imported rules must test the components of a pointer separately.
/// ///
/// SizeInBits must be non-zero and the matched pointer must be that size. /// If SizeInBits is zero, then the pointer size will be obtained from the
/// TODO: Add support for iPTR via SizeInBits==0 and a subtarget query. /// subtarget.
class PointerToAnyOperandMatcher : public OperandPredicateMatcher { class PointerToAnyOperandMatcher : public OperandPredicateMatcher {
protected: protected:
unsigned SizeInBits; unsigned SizeInBits;
@ -979,9 +979,15 @@ public:
Error addTypeCheckPredicate(const TypeSetByHwMode &VTy, Error addTypeCheckPredicate(const TypeSetByHwMode &VTy,
bool OperandIsAPointer) { bool OperandIsAPointer) {
auto OpTyOrNone = VTy.isMachineValueType() if (!VTy.isMachineValueType())
? MVTToLLT(VTy.getMachineValueType().SimpleTy) return failedImport("unsupported typeset");
: None;
if (VTy.getMachineValueType() == MVT::iPTR && OperandIsAPointer) {
addPredicate<PointerToAnyOperandMatcher>(0);
return Error::success();
}
auto OpTyOrNone = MVTToLLT(VTy.getMachineValueType().SimpleTy);
if (!OpTyOrNone) if (!OpTyOrNone)
return failedImport("unsupported type"); return failedImport("unsupported type");
@ -2977,20 +2983,16 @@ void GlobalISelEmitter::run(raw_ostream &OS) {
"ComplexRendererFn(" "ComplexRendererFn("
<< Target.getName() << Target.getName()
<< "InstructionSelector::*ComplexMatcherMemFn)(MachineOperand &) const;\n" << "InstructionSelector::*ComplexMatcherMemFn)(MachineOperand &) const;\n"
<< "const MatcherInfoTy<PredicateBitset, ComplexMatcherMemFn> " << " const MatcherInfoTy<PredicateBitset, ComplexMatcherMemFn> "
"MatcherInfo;\n" "MatcherInfo;\n"
<< " static " << Target.getName()
<< "InstructionSelector::ComplexMatcherMemFn ComplexPredicateFns[];\n"
<< "#endif // ifdef GET_GLOBALISEL_TEMPORARIES_DECL\n\n"; << "#endif // ifdef GET_GLOBALISEL_TEMPORARIES_DECL\n\n";
OS << "#ifdef GET_GLOBALISEL_TEMPORARIES_INIT\n" OS << "#ifdef GET_GLOBALISEL_TEMPORARIES_INIT\n"
<< ", State(" << MaxTemporaries << "),\n" << ", State(" << MaxTemporaries << "),\n"
<< "MatcherInfo({TypeObjects, FeatureBitsets, I64ImmPredicateFns, " << "MatcherInfo({TypeObjects, FeatureBitsets, I64ImmPredicateFns, "
"APIntImmPredicateFns, APFloatImmPredicateFns, {\n" "APIntImmPredicateFns, APFloatImmPredicateFns, ComplexPredicateFns})\n"
<< " nullptr, // GICP_Invalid\n";
for (const auto &Record : ComplexPredicates)
OS << " &" << Target.getName()
<< "InstructionSelector::" << Record->getValueAsString("MatcherFn")
<< ", // " << Record->getName() << "\n";
OS << "}})\n"
<< "#endif // ifdef GET_GLOBALISEL_TEMPORARIES_INIT\n\n"; << "#endif // ifdef GET_GLOBALISEL_TEMPORARIES_INIT\n\n";
OS << "#ifdef GET_GLOBALISEL_IMPL\n"; OS << "#ifdef GET_GLOBALISEL_IMPL\n";
@ -3109,6 +3111,16 @@ void GlobalISelEmitter::run(raw_ostream &OS) {
emitImmPredicates(OS, "APInt", "const APInt &", [](const Record *R) { emitImmPredicates(OS, "APInt", "const APInt &", [](const Record *R) {
return R->getValueAsBit("IsAPInt"); return R->getValueAsBit("IsAPInt");
}); });
OS << "\n";
OS << Target.getName() << "InstructionSelector::ComplexMatcherMemFn\n"
<< Target.getName() << "InstructionSelector::ComplexPredicateFns[] = {\n"
<< " nullptr, // GICP_Invalid\n";
for (const auto &Record : ComplexPredicates)
OS << " &" << Target.getName()
<< "InstructionSelector::" << Record->getValueAsString("MatcherFn")
<< ", // " << Record->getName() << "\n";
OS << "};\n\n";
OS << "bool " << Target.getName() OS << "bool " << Target.getName()
<< "InstructionSelector::selectImpl(MachineInstr &I) const {\n" << "InstructionSelector::selectImpl(MachineInstr &I) const {\n"