GlobalISel: support translation of intrinsic calls.

These come in two variants for now: G_INTRINSIC and G_INTRINSIC_W_SIDE_EFFECTS.
We may decide to split the latter up with finer-grained restrictions later, if
necessary.

llvm-svn: 277224
This commit is contained in:
Tim Northover 2016-07-29 22:32:36 +00:00
parent 31b07f1445
commit 5fb414d870
9 changed files with 109 additions and 2 deletions

View File

@ -103,6 +103,10 @@ private:
/// Translate an LLVM store instruction into generic IR.
bool translateStore(const StoreInst &SI);
/// Translate call instruction.
/// \pre \p Inst is a branch instruction.
bool translateCall(const CallInst &Inst);
/// Translate one of LLVM's cast instructions into MachineInstrs, with the
/// given generic Opcode.
bool translateCast(unsigned Opcode, const CastInst &CI);
@ -119,6 +123,7 @@ private:
/// \pre \p Inst is a branch instruction.
bool translateBr(const BranchInst &Inst);
/// Translate return (ret) instruction.
/// The target needs to implement CallLowering::lowerReturn for
/// this to succeed.

View File

@ -204,6 +204,19 @@ public:
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildSequence(LLT Ty, unsigned Res,
ArrayRef<unsigned> Ops);
/// Build and insert either a G_INTRINSIC (if \p HasSideEffects is false) or
/// G_INTRINSIC_W_SIDE_EFFECTS instruction. Its first operand will be the
/// result register definition unless \p Reg is NoReg (== 0). The second
/// operand will be the intrinsic's ID.
///
/// Callers are expected to add the required definitions and uses afterwards.
///
/// \pre setBasicBlock or setMI must have been called.
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildIntrinsic(ArrayRef<LLT> Tys, Intrinsic::ID ID,
unsigned Res, bool HasSideEffects);
};
} // End namespace llvm.

View File

@ -125,6 +125,22 @@ def G_SEQUENCE : Instruction {
let hasSideEffects = 0;
}
// Intrinsic without side effects.
def G_INTRINSIC : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins unknown:$intrin, variable_ops);
let hasSideEffects = 0;
}
// Intrinsic with side effects.
def G_INTRINSIC_W_SIDE_EFFECTS : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins unknown:$intrin, variable_ops);
let hasSideEffects = 1;
let mayLoad = 1;
let mayStore = 1;
}
//------------------------------------------------------------------------------
// Branches.
//------------------------------------------------------------------------------

View File

@ -53,7 +53,7 @@ public:
}
/// Return the target intrinsic ID of a function, or 0.
virtual unsigned getIntrinsicID(Function *F) const;
virtual unsigned getIntrinsicID(const Function *F) const;
/// Returns true if the intrinsic can be overloaded.
virtual bool isOverloaded(unsigned IID) const = 0;

View File

@ -202,6 +202,12 @@ HANDLE_TARGET_OPCODE(G_STORE)
/// Generic conditional branch instruction.
HANDLE_TARGET_OPCODE(G_BRCOND)
/// Generic intrinsic use (without side effects).
HANDLE_TARGET_OPCODE(G_INTRINSIC)
/// Generic intrinsic use (with side effects).
HANDLE_TARGET_OPCODE(G_INTRINSIC_W_SIDE_EFFECTS)
/// Generic BRANCH instruction. This is an unconditional branch.
HANDLE_TARGET_OPCODE(G_BR)

View File

@ -19,8 +19,10 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
#include "llvm/Target/TargetLowering.h"
#define DEBUG_TYPE "irtranslator"
@ -175,6 +177,34 @@ bool IRTranslator::translateCast(unsigned Opcode, const CastInst &CI) {
return true;
}
bool IRTranslator::translateCall(const CallInst &CI) {
auto TII = MIRBuilder.getMF().getTarget().getIntrinsicInfo();
const Function &F = *CI.getCalledFunction();
Intrinsic::ID ID = F.getIntrinsicID();
if (TII && ID == Intrinsic::not_intrinsic)
ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(&F));
assert(ID != Intrinsic::not_intrinsic && "FIXME: support real calls");
// Need types (starting with return) & args.
SmallVector<LLT, 4> Tys;
Tys.emplace_back(*CI.getType());
for (auto &Arg : CI.arg_operands())
Tys.emplace_back(*Arg->getType());
unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
MachineInstrBuilder MIB =
MIRBuilder.buildIntrinsic(Tys, ID, Res, !CI.doesNotAccessMemory());
for (auto &Arg : CI.arg_operands()) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg))
MIB.addImm(CI->getSExtValue());
else
MIB.addUse(getOrCreateVReg(*Arg));
}
return true;
}
bool IRTranslator::translateStaticAlloca(const AllocaInst &AI) {
assert(AI.isStaticAlloca() && "only handle static allocas now");
MachineFunction &MF = MIRBuilder.getMF();
@ -218,6 +248,10 @@ bool IRTranslator::translate(const Instruction &Inst) {
case Instruction::Ret:
return translateReturn(cast<ReturnInst>(Inst));
// Calls
case Instruction::Call:
return translateCall(cast<CallInst>(Inst));
// Casts
case Instruction::BitCast:
return translateBitCast(cast<CastInst>(Inst));

View File

@ -143,3 +143,17 @@ MachineInstrBuilder MachineIRBuilder::buildSequence(LLT Ty, unsigned Res,
MIB.addUse(Op);
return MIB;
}
MachineInstrBuilder MachineIRBuilder::buildIntrinsic(ArrayRef<LLT> Tys,
Intrinsic::ID ID,
unsigned Res,
bool HasSideEffects) {
auto MIB =
buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
: TargetOpcode::G_INTRINSIC,
Tys);
if (Res)
MIB.addDef(Res);
MIB.addIntrinsicID(ID);
return MIB;
}

View File

@ -22,7 +22,7 @@ TargetIntrinsicInfo::TargetIntrinsicInfo() {
TargetIntrinsicInfo::~TargetIntrinsicInfo() {
}
unsigned TargetIntrinsicInfo::getIntrinsicID(Function *F) const {
unsigned TargetIntrinsicInfo::getIntrinsicID(const Function *F) const {
const ValueName *ValName = F->getValueName();
if (!ValName)
return 0;

View File

@ -252,3 +252,22 @@ define void @store(i64* %addr, i64 addrspace(42)* %addr42, i64 %val1, i64 %val2)
%sum = add i64 %val1, %val2
ret void
}
; CHECK-LABEL: name: intrinsics
; CHECK: [[CUR:%[0-9]+]](32) = COPY %w0
; CHECK: [[BITS:%[0-9]+]](32) = COPY %w1
; CHECK: [[PTR:%[0-9]+]](64) = G_INTRINSIC { p0, s32 } intrinsic(@llvm.returnaddress), 0
; CHECK: [[PTR_VEC:%[0-9]+]](64) = G_FRAME_INDEX p0 %stack.0.ptr.vec
; CHECK: [[VEC:%[0-9]+]](64) = G_LOAD { <8 x s8>, p0 } [[PTR_VEC]]
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS { unsized, <8 x s8>, <8 x s8>, p0 } intrinsic(@llvm.aarch64.neon.st2), [[VEC]], [[VEC]], [[PTR]]
; CHECK: RET_ReallyLR
declare i8* @llvm.returnaddress(i32)
declare void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8>, <8 x i8>, i8*)
declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>*)
define void @intrinsics(i32 %cur, i32 %bits) {
%ptr = call i8* @llvm.returnaddress(i32 0)
%ptr.vec = alloca <8 x i8>
%vec = load <8 x i8>, <8 x i8>* %ptr.vec
call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec, <8 x i8> %vec, i8* %ptr)
ret void
}