Revert r315885: [globalisel][tblgen] Add support for iPTR and implement am_unscaled* and am_indexed*

MSVC doesn't like one of the constructors.

llvm-svn: 315886
This commit is contained in:
Daniel Sanders 2017-10-16 02:15:39 +00:00
parent 6735ea86cd
commit ce72d611af
7 changed files with 11 additions and 205 deletions

View File

@ -341,12 +341,6 @@ protected:
bool isOperandImmEqual(const MachineOperand &MO, int64_t Value, bool isOperandImmEqual(const MachineOperand &MO, int64_t Value,
const MachineRegisterInfo &MRI) const; const MachineRegisterInfo &MRI) const;
/// Return true if the specified operand is a G_GEP with a G_CONSTANT on the
/// right-hand side. GlobalISel's separation of pointer and integer types
/// means that we don't need to worry about G_OR with equivalent semantics.
bool isBaseWithConstantOffset(const MachineOperand &Root,
const MachineRegisterInfo &MRI) const;
bool isObviouslySafeToFold(MachineInstr &MI) const; bool isObviouslySafeToFold(MachineInstr &MI) const;
}; };

View File

@ -248,20 +248,10 @@ bool InstructionSelector::executeMatchTable(
int64_t InsnID = MatchTable[CurrentIdx++]; int64_t InsnID = MatchTable[CurrentIdx++];
int64_t OpIdx = MatchTable[CurrentIdx++]; int64_t OpIdx = MatchTable[CurrentIdx++];
int64_t SizeInBits = MatchTable[CurrentIdx++]; int64_t SizeInBits = MatchTable[CurrentIdx++];
DEBUG(dbgs() << CurrentIdx << ": GIM_CheckPointerToAny(MIs[" << InsnID DEBUG(dbgs() << CurrentIdx << ": GIM_CheckPointerToAny(MIs[" << InsnID
<< "]->getOperand(" << OpIdx << "]->getOperand(" << OpIdx
<< "), SizeInBits=" << SizeInBits << ")\n"); << "), SizeInBits=" << SizeInBits << ")\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined"); assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
// iPTR must be looked up in the target.
if (SizeInBits == 0) {
MachineFunction *MF = State.MIs[InsnID]->getParent()->getParent();
SizeInBits = MF->getDataLayout().getPointerSizeInBits(0);
}
assert(SizeInBits != 0 && "Pointer size must be known");
const LLT &Ty = MRI.getType(State.MIs[InsnID]->getOperand(OpIdx).getReg()); const LLT &Ty = MRI.getType(State.MIs[InsnID]->getOperand(OpIdx).getReg());
if (!Ty.isPointer() || Ty.getSizeInBits() != SizeInBits) { if (!Ty.isPointer() || Ty.getSizeInBits() != SizeInBits) {
if (handleReject() == RejectAndGiveUp) if (handleReject() == RejectAndGiveUp)

View File

@ -18,7 +18,6 @@
#include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/MC/MCInstrDesc.h" #include "llvm/MC/MCInstrDesc.h"
#include "llvm/Support/Debug.h" #include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h" #include "llvm/Support/raw_ostream.h"
@ -99,23 +98,6 @@ bool InstructionSelector::isOperandImmEqual(
return false; return false;
} }
bool InstructionSelector::isBaseWithConstantOffset(
const MachineOperand &Root, const MachineRegisterInfo &MRI) const {
if (!Root.isReg())
return false;
MachineInstr *RootI = MRI.getVRegDef(Root.getReg());
if (RootI->getOpcode() != TargetOpcode::G_GEP)
return false;
MachineOperand &RHS = RootI->getOperand(2);
MachineInstr *RHSI = MRI.getVRegDef(RHS.getReg());
if (RHSI->getOpcode() != TargetOpcode::G_CONSTANT)
return false;
return true;
}
bool InstructionSelector::isObviouslySafeToFold(MachineInstr &MI) const { bool InstructionSelector::isObviouslySafeToFold(MachineInstr &MI) const {
return !MI.mayLoadOrStore() && !MI.hasUnmodeledSideEffects() && return !MI.mayLoadOrStore() && !MI.hasUnmodeledSideEffects() &&
MI.implicit_operands().begin() == MI.implicit_operands().end(); MI.implicit_operands().begin() == MI.implicit_operands().end();

View File

@ -2516,22 +2516,6 @@ def am_indexed32 : ComplexPattern<i64, 2, "SelectAddrModeIndexed32", []>;
def am_indexed64 : ComplexPattern<i64, 2, "SelectAddrModeIndexed64", []>; def am_indexed64 : ComplexPattern<i64, 2, "SelectAddrModeIndexed64", []>;
def am_indexed128 : ComplexPattern<i64, 2, "SelectAddrModeIndexed128", []>; def am_indexed128 : ComplexPattern<i64, 2, "SelectAddrModeIndexed128", []>;
def gi_am_indexed8 :
GIComplexOperandMatcher<s64, "selectAddrModeIndexed<8>">,
GIComplexPatternEquiv<am_indexed8>;
def gi_am_indexed16 :
GIComplexOperandMatcher<s64, "selectAddrModeIndexed<16>">,
GIComplexPatternEquiv<am_indexed16>;
def gi_am_indexed32 :
GIComplexOperandMatcher<s64, "selectAddrModeIndexed<32>">,
GIComplexPatternEquiv<am_indexed32>;
def gi_am_indexed64 :
GIComplexOperandMatcher<s64, "selectAddrModeIndexed<64>">,
GIComplexPatternEquiv<am_indexed64>;
def gi_am_indexed128 :
GIComplexOperandMatcher<s64, "selectAddrModeIndexed<128>">,
GIComplexPatternEquiv<am_indexed128>;
class UImm12OffsetOperand<int Scale> : AsmOperandClass { class UImm12OffsetOperand<int Scale> : AsmOperandClass {
let Name = "UImm12Offset" # Scale; let Name = "UImm12Offset" # Scale;
let RenderMethod = "addUImm12OffsetOperands<" # Scale # ">"; let RenderMethod = "addUImm12OffsetOperands<" # Scale # ">";
@ -3162,23 +3146,6 @@ def am_unscaled32 : ComplexPattern<i64, 2, "SelectAddrModeUnscaled32", []>;
def am_unscaled64 : ComplexPattern<i64, 2, "SelectAddrModeUnscaled64", []>; def am_unscaled64 : ComplexPattern<i64, 2, "SelectAddrModeUnscaled64", []>;
def am_unscaled128 :ComplexPattern<i64, 2, "SelectAddrModeUnscaled128", []>; def am_unscaled128 :ComplexPattern<i64, 2, "SelectAddrModeUnscaled128", []>;
def gi_am_unscaled8 :
GIComplexOperandMatcher<s64, "selectAddrModeUnscaled8">,
GIComplexPatternEquiv<am_unscaled8>;
def gi_am_unscaled16 :
GIComplexOperandMatcher<s64, "selectAddrModeUnscaled16">,
GIComplexPatternEquiv<am_unscaled16>;
def gi_am_unscaled32 :
GIComplexOperandMatcher<s64, "selectAddrModeUnscaled32">,
GIComplexPatternEquiv<am_unscaled32>;
def gi_am_unscaled64 :
GIComplexOperandMatcher<s64, "selectAddrModeUnscaled64">,
GIComplexPatternEquiv<am_unscaled64>;
def gi_am_unscaled128 :
GIComplexOperandMatcher<s64, "selectAddrModeUnscaled128">,
GIComplexPatternEquiv<am_unscaled128>;
class BaseLoadStoreUnscale<bits<2> sz, bit V, bits<2> opc, dag oops, dag iops, class BaseLoadStoreUnscale<bits<2> sz, bit V, bits<2> opc, dag oops, dag iops,
string asm, list<dag> pattern> string asm, list<dag> pattern>
: I<oops, iops, asm, "\t$Rt, [$Rn, $offset]", "", pattern> { : I<oops, iops, asm, "\t$Rt, [$Rn, $offset]", "", pattern> {

View File

@ -66,32 +66,6 @@ private:
ComplexRendererFn selectArithImmed(MachineOperand &Root) const; ComplexRendererFn selectArithImmed(MachineOperand &Root) const;
ComplexRendererFn selectAddrModeUnscaled(MachineOperand &Root,
unsigned Size) const;
ComplexRendererFn selectAddrModeUnscaled8(MachineOperand &Root) const {
return selectAddrModeUnscaled(Root, 1);
}
ComplexRendererFn selectAddrModeUnscaled16(MachineOperand &Root) const {
return selectAddrModeUnscaled(Root, 2);
}
ComplexRendererFn selectAddrModeUnscaled32(MachineOperand &Root) const {
return selectAddrModeUnscaled(Root, 4);
}
ComplexRendererFn selectAddrModeUnscaled64(MachineOperand &Root) const {
return selectAddrModeUnscaled(Root, 8);
}
ComplexRendererFn selectAddrModeUnscaled128(MachineOperand &Root) const {
return selectAddrModeUnscaled(Root, 16);
}
ComplexRendererFn selectAddrModeIndexed(MachineOperand &Root,
unsigned Size) const;
template <int Width>
ComplexRendererFn selectAddrModeIndexed(MachineOperand &Root) const {
return selectAddrModeIndexed(Root, Width / 8);
}
const AArch64TargetMachine &TM; const AArch64TargetMachine &TM;
const AArch64Subtarget &STI; const AArch64Subtarget &STI;
const AArch64InstrInfo &TII; const AArch64InstrInfo &TII;
@ -1418,105 +1392,6 @@ AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const {
}}; }};
} }
/// Select a "register plus unscaled signed 9-bit immediate" address. This
/// should only match when there is an offset that is not valid for a scaled
/// immediate addressing mode. The "Size" argument is the size in bytes of the
/// memory reference, which is needed here to know what is valid for a scaled
/// immediate.
InstructionSelector::ComplexRendererFn
AArch64InstructionSelector::selectAddrModeUnscaled(MachineOperand &Root,
unsigned Size) const {
MachineRegisterInfo &MRI =
Root.getParent()->getParent()->getParent()->getRegInfo();
if (!Root.isReg())
return None;
if (!isBaseWithConstantOffset(Root, MRI))
return None;
MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
if (!RootDef)
return None;
MachineOperand &OffImm = RootDef->getOperand(2);
if (!OffImm.isReg())
return None;
MachineInstr *RHS = MRI.getVRegDef(OffImm.getReg());
if (!RHS || RHS->getOpcode() != TargetOpcode::G_CONSTANT)
return None;
int64_t RHSC;
MachineOperand &RHSOp1 = RHS->getOperand(1);
if (!RHSOp1.isCImm() || RHSOp1.getCImm()->getBitWidth() > 64)
return None;
RHSC = RHSOp1.getCImm()->getSExtValue();
// If the offset is valid as a scaled immediate, don't match here.
if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Log2_32(Size)))
return None;
if (RHSC >= -256 && RHSC < 256) {
MachineOperand &Base = RootDef->getOperand(1);
return {{
[=](MachineInstrBuilder &MIB) { MIB.add(Base); },
[=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
}};
}
return None;
}
/// Select a "register plus scaled unsigned 12-bit immediate" address. The
/// "Size" argument is the size in bytes of the memory reference, which
/// determines the scale.
InstructionSelector::ComplexRendererFn
AArch64InstructionSelector::selectAddrModeIndexed(MachineOperand &Root,
unsigned Size) const {
MachineRegisterInfo &MRI =
Root.getParent()->getParent()->getParent()->getRegInfo();
if (!Root.isReg())
return None;
MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
if (!RootDef)
return None;
if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
return {{
[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
[=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
}};
}
if (isBaseWithConstantOffset(Root, MRI)) {
MachineOperand &LHS = RootDef->getOperand(1);
MachineOperand &RHS = RootDef->getOperand(2);
MachineInstr *LHSDef = MRI.getVRegDef(LHS.getReg());
MachineInstr *RHSDef = MRI.getVRegDef(RHS.getReg());
if (LHSDef && RHSDef) {
int64_t RHSC = (int64_t)RHSDef->getOperand(1).getCImm()->getZExtValue();
unsigned Scale = Log2_32(Size);
if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
LHSDef = MRI.getVRegDef(LHSDef->getOperand(1).getReg());
return {{
[=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
[=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
}};
}
}
}
// Before falling back to our general case, check if the unscaled
// instructions can handle this. If so, that's preferable.
if (selectAddrModeUnscaled(Root, Size).hasValue())
return None;
return {{
[=](MachineInstrBuilder &MIB) { MIB.add(Root); },
[=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
}};
}
namespace llvm { namespace llvm {
InstructionSelector * InstructionSelector *
createAArch64InstructionSelector(const AArch64TargetMachine &TM, createAArch64InstructionSelector(const AArch64TargetMachine &TM,

View File

@ -1,5 +1,9 @@
# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s # RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
# This patch temporarily causes LD1Onev1d to match instead of LDRDui on a
# couple functions. A patch to support iPTR will follow that fixes this.
# XFAIL: *
--- | --- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
@ -529,13 +533,13 @@ registers:
# CHECK: body: # CHECK: body:
# CHECK: %0 = COPY %x0 # CHECK: %0 = COPY %x0
# CHECK: %1 = LDRDui %0, 0 :: (load 8 from %ir.addr) # CHECK: %1 = LD1Onev2s %0
# CHECK: %d0 = COPY %1 # CHECK: %d0 = COPY %1
body: | body: |
bb.0: bb.0:
liveins: %x0 liveins: %x0
%0(p0) = COPY %x0 %0(p0) = COPY %x0
%1(<2 x s32>) = G_LOAD %0 :: (load 8 from %ir.addr) %1(<2 x s32>) = G_LOAD %0 :: (load 4 from %ir.addr)
%d0 = COPY %1(<2 x s32>) %d0 = COPY %1(<2 x s32>)
... ...

View File

@ -775,8 +775,8 @@ std::set<LLTCodeGen> LLTOperandMatcher::KnownTypes;
/// no reliable means to derive the missing type information from the pattern so /// no reliable means to derive the missing type information from the pattern so
/// imported rules must test the components of a pointer separately. /// imported rules must test the components of a pointer separately.
/// ///
/// If SizeInBits is zero, then the pointer size will be obtained from the /// SizeInBits must be non-zero and the matched pointer must be that size.
/// subtarget. /// TODO: Add support for iPTR via SizeInBits==0 and a subtarget query.
class PointerToAnyOperandMatcher : public OperandPredicateMatcher { class PointerToAnyOperandMatcher : public OperandPredicateMatcher {
protected: protected:
unsigned SizeInBits; unsigned SizeInBits;
@ -979,15 +979,9 @@ public:
Error addTypeCheckPredicate(const TypeSetByHwMode &VTy, Error addTypeCheckPredicate(const TypeSetByHwMode &VTy,
bool OperandIsAPointer) { bool OperandIsAPointer) {
if (!VTy.isMachineValueType()) auto OpTyOrNone = VTy.isMachineValueType()
return failedImport("unsupported typeset"); ? MVTToLLT(VTy.getMachineValueType().SimpleTy)
: None;
if (VTy.getMachineValueType() == MVT::iPTR && OperandIsAPointer) {
addPredicate<PointerToAnyOperandMatcher>(0);
return Error::success();
}
auto OpTyOrNone = MVTToLLT(VTy.getMachineValueType().SimpleTy);
if (!OpTyOrNone) if (!OpTyOrNone)
return failedImport("unsupported type"); return failedImport("unsupported type");