diff --git a/llvm/autoconf/configure.ac b/llvm/autoconf/configure.ac index f2c7714a2a94..2054548979d4 100644 --- a/llvm/autoconf/configure.ac +++ b/llvm/autoconf/configure.ac @@ -225,6 +225,7 @@ AC_CACHE_CHECK([target architecture],[llvm_cv_target_arch], arm-*) llvm_cv_target_arch="ARM" ;; mips-*) llvm_cv_target_arch="Mips" ;; pic16-*) llvm_cv_target_arch="PIC16" ;; + xcore-*) llvm_cv_target_arch="XCore" ;; *) llvm_cv_target_arch="Unknown" ;; esac]) @@ -332,6 +333,7 @@ else ARM) AC_SUBST(TARGET_HAS_JIT,0) ;; Mips) AC_SUBST(TARGET_HAS_JIT,0) ;; PIC16) AC_SUBST(TARGET_HAS_JIT,0) ;; + XCore) AC_SUBST(TARGET_HAS_JIT,0) ;; *) AC_SUBST(TARGET_HAS_JIT,0) ;; esac fi @@ -381,7 +383,7 @@ AC_ARG_ENABLE([targets],AS_HELP_STRING([--enable-targets], [Build specific host targets: all,host-only,{target-name} (default=all)]),, enableval=all) case "$enableval" in - all) TARGETS_TO_BUILD="X86 Sparc PowerPC Alpha IA64 ARM Mips CellSPU PIC16 CBackend MSIL CppBackend" ;; + all) TARGETS_TO_BUILD="X86 Sparc PowerPC Alpha IA64 ARM Mips CellSPU PIC16 XCore CBackend MSIL CppBackend" ;; host-only) case "$llvm_cv_target_arch" in x86) TARGETS_TO_BUILD="X86" ;; @@ -394,6 +396,7 @@ case "$enableval" in Mips) TARGETS_TO_BUILD="Mips" ;; CellSPU|SPU) TARGETS_TO_BUILD="CellSPU" ;; PIC16) TARGETS_TO_BUILD="PIC16" ;; + XCore) TARGETS_TO_BUILD="XCore" ;; *) AC_MSG_ERROR([Can not set target to build]) ;; esac ;; @@ -409,6 +412,7 @@ case "$enableval" in mips) TARGETS_TO_BUILD="Mips $TARGETS_TO_BUILD" ;; spu) TARGETS_TO_BUILD="CellSPU $TARGETS_TO_BUILD" ;; pic16) TARGETS_TO_BUILD="PIC16 $TARGETS_TO_BUILD" ;; + xcore) TARGETS_TO_BUILD="XCore $TARGETS_TO_BUILD" ;; cbe) TARGETS_TO_BUILD="CBackend $TARGETS_TO_BUILD" ;; msil) TARGETS_TO_BUILD="MSIL $TARGETS_TO_BUILD" ;; cpp) TARGETS_TO_BUILD="CppBackend $TARGETS_TO_BUILD" ;; diff --git a/llvm/configure b/llvm/configure index 154e0931d217..f4faa671324a 100755 --- a/llvm/configure +++ b/llvm/configure @@ -2389,6 +2389,7 @@ else arm-*) llvm_cv_target_arch="ARM" ;; mips-*) llvm_cv_target_arch="Mips" ;; pic16-*) llvm_cv_target_arch="PIC16" ;; + xcore-*) llvm_cv_target_arch="XCore" ;; *) llvm_cv_target_arch="Unknown" ;; esac fi @@ -4810,6 +4811,8 @@ else Mips) TARGET_HAS_JIT=0 ;; PIC16) TARGET_HAS_JIT=0 + ;; + XCore) TARGET_HAS_JIT=0 ;; *) TARGET_HAS_JIT=0 ;; @@ -4892,7 +4895,7 @@ else fi case "$enableval" in - all) TARGETS_TO_BUILD="X86 Sparc PowerPC Alpha IA64 ARM Mips CellSPU PIC16 CBackend MSIL CppBackend" ;; + all) TARGETS_TO_BUILD="X86 Sparc PowerPC Alpha IA64 ARM Mips CellSPU PIC16 XCore CBackend MSIL CppBackend" ;; host-only) case "$llvm_cv_target_arch" in x86) TARGETS_TO_BUILD="X86" ;; @@ -4905,6 +4908,7 @@ case "$enableval" in Mips) TARGETS_TO_BUILD="Mips" ;; CellSPU|SPU) TARGETS_TO_BUILD="CellSPU" ;; PIC16) TARGETS_TO_BUILD="PIC16" ;; + XCore) TARGETS_TO_BUILD="XCore" ;; *) { { echo "$as_me:$LINENO: error: Can not set target to build" >&5 echo "$as_me: error: Can not set target to build" >&2;} { (exit 1); exit 1; }; } ;; @@ -4922,6 +4926,7 @@ echo "$as_me: error: Can not set target to build" >&2;} mips) TARGETS_TO_BUILD="Mips $TARGETS_TO_BUILD" ;; spu) TARGETS_TO_BUILD="CellSPU $TARGETS_TO_BUILD" ;; pic16) TARGETS_TO_BUILD="PIC16 $TARGETS_TO_BUILD" ;; + xcore) TARGETS_TO_BUILD="XCore $TARGETS_TO_BUILD" ;; cbe) TARGETS_TO_BUILD="CBackend $TARGETS_TO_BUILD" ;; msil) TARGETS_TO_BUILD="MSIL $TARGETS_TO_BUILD" ;; cpp) TARGETS_TO_BUILD="CppBackend $TARGETS_TO_BUILD" ;; @@ -10827,7 +10832,7 @@ else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext < conftest.$ac_ext + echo '#line 12979 "configure"' > conftest.$ac_ext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>&5 ac_status=$? @@ -14689,11 +14694,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:14692: $lt_compile\"" >&5) + (eval echo "\"\$as_me:14697: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:14696: \$? = $ac_status" >&5 + echo "$as_me:14701: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. @@ -14957,11 +14962,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:14960: $lt_compile\"" >&5) + (eval echo "\"\$as_me:14965: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:14964: \$? = $ac_status" >&5 + echo "$as_me:14969: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. @@ -15061,11 +15066,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:15064: $lt_compile\"" >&5) + (eval echo "\"\$as_me:15069: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 - echo "$as_me:15068: \$? = $ac_status" >&5 + echo "$as_me:15073: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized @@ -17513,7 +17518,7 @@ else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext < conftest.$ac_ext <&5) + (eval echo "\"\$as_me:19989: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:19988: \$? = $ac_status" >&5 + echo "$as_me:19993: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. @@ -20085,11 +20090,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:20088: $lt_compile\"" >&5) + (eval echo "\"\$as_me:20093: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 - echo "$as_me:20092: \$? = $ac_status" >&5 + echo "$as_me:20097: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized @@ -21655,11 +21660,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:21658: $lt_compile\"" >&5) + (eval echo "\"\$as_me:21663: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:21662: \$? = $ac_status" >&5 + echo "$as_me:21667: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. @@ -21759,11 +21764,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:21762: $lt_compile\"" >&5) + (eval echo "\"\$as_me:21767: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 - echo "$as_me:21766: \$? = $ac_status" >&5 + echo "$as_me:21771: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized @@ -23994,11 +23999,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:23997: $lt_compile\"" >&5) + (eval echo "\"\$as_me:24002: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:24001: \$? = $ac_status" >&5 + echo "$as_me:24006: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. @@ -24262,11 +24267,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:24265: $lt_compile\"" >&5) + (eval echo "\"\$as_me:24270: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:24269: \$? = $ac_status" >&5 + echo "$as_me:24274: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. @@ -24366,11 +24371,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:24369: $lt_compile\"" >&5) + (eval echo "\"\$as_me:24374: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 - echo "$as_me:24373: \$? = $ac_status" >&5 + echo "$as_me:24378: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized diff --git a/llvm/lib/Target/XCore/CMakeLists.txt b/llvm/lib/Target/XCore/CMakeLists.txt new file mode 100644 index 000000000000..a7aba14a7a14 --- /dev/null +++ b/llvm/lib/Target/XCore/CMakeLists.txt @@ -0,0 +1,23 @@ +set(LLVM_TARGET_DEFINITIONS XCore.td) + +tablegen(XCoreGenRegisterInfo.h.inc -gen-register-desc-header) +tablegen(XCoreGenRegisterNames.inc -gen-register-enums) +tablegen(XCoreGenRegisterInfo.inc -gen-register-desc) +tablegen(XCoreGenInstrNames.inc -gen-instr-enums) +tablegen(XCoreGenInstrInfo.inc -gen-instr-desc) +tablegen(XCoreGenAsmWriter.inc -gen-asm-writer) +tablegen(XCoreGenDAGISel.inc -gen-dag-isel) +tablegen(XCoreGenCallingConv.inc -gen-callingconv) +tablegen(XCoreGenSubtarget.inc -gen-subtarget) + +add_llvm_target(XCore + XCoreAsmPrinter.cpp + XCoreFrameInfo.cpp + XCoreInstrInfo.cpp + XCoreISelDAGToDAG.cpp + XCoreISelLowering.cpp + XCoreRegisterInfo.cpp + XCoreSubtarget.cpp + XCoreTargetAsmInfo.cpp + XCoreTargetMachine.cpp + ) diff --git a/llvm/lib/Target/XCore/README.txt b/llvm/lib/Target/XCore/README.txt new file mode 100644 index 000000000000..deaeb0f2a93b --- /dev/null +++ b/llvm/lib/Target/XCore/README.txt @@ -0,0 +1,8 @@ +To-do +----- + +* Instruction encodings +* Tailcalls +* Investigate loop alignment +* Add builtins +* Make better use of lmul / macc diff --git a/llvm/lib/Target/XCore/XCore.h b/llvm/lib/Target/XCore/XCore.h new file mode 100644 index 000000000000..347ac47ac3e7 --- /dev/null +++ b/llvm/lib/Target/XCore/XCore.h @@ -0,0 +1,38 @@ +//===-- XCore.h - Top-level interface for XCore representation --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the entry points for global functions defined in the LLVM +// XCore back-end. +// +//===----------------------------------------------------------------------===// + +#ifndef TARGET_XCORE_H +#define TARGET_XCORE_H + +namespace llvm { + class FunctionPass; + class TargetMachine; + class XCoreTargetMachine; + class raw_ostream; + + FunctionPass *createXCoreISelDag(XCoreTargetMachine &TM); + FunctionPass *createXCoreCodePrinterPass(raw_ostream &OS, + XCoreTargetMachine &TM); +} // end namespace llvm; + +// Defines symbolic names for XCore registers. This defines a mapping from +// register name to register number. +// +#include "XCoreGenRegisterNames.inc" + +// Defines symbolic names for the XCore instructions. +// +#include "XCoreGenInstrNames.inc" + +#endif diff --git a/llvm/lib/Target/XCore/XCore.td b/llvm/lib/Target/XCore/XCore.td new file mode 100644 index 000000000000..39c4226b616b --- /dev/null +++ b/llvm/lib/Target/XCore/XCore.td @@ -0,0 +1,62 @@ +//===- XCore.td - Describe the XCore Target Machine --------*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Target-independent interfaces which we are implementing +//===----------------------------------------------------------------------===// + +include "../Target.td" + +//===----------------------------------------------------------------------===// +// Descriptions +//===----------------------------------------------------------------------===// + +include "XCoreRegisterInfo.td" +include "XCoreInstrInfo.td" +include "XCoreCallingConv.td" + +def XCoreInstrInfo : InstrInfo { + let TSFlagsFields = []; + let TSFlagsShifts = []; +} + +//===----------------------------------------------------------------------===// +// XCore Subtarget features. +//===----------------------------------------------------------------------===// + +def FeatureXS1A + : SubtargetFeature<"xs1a", "IsXS1A", "true", + "Enable XS1A instructions">; + +def FeatureXS1B + : SubtargetFeature<"xs1b", "IsXS1B", "true", + "Enable XS1B instructions">; + +//===----------------------------------------------------------------------===// +// XCore processors supported. +//===----------------------------------------------------------------------===// + +class Proc Features> + : Processor; + +def : Proc<"generic", [FeatureXS1A]>; +def : Proc<"xs1a-generic", [FeatureXS1A]>; +def : Proc<"xs1b-generic", [FeatureXS1B]>; + +//===----------------------------------------------------------------------===// +// Declare the target which we are implementing +//===----------------------------------------------------------------------===// + +def XCore : Target { + // Pull in Instruction Info: + let InstructionSet = XCoreInstrInfo; +} diff --git a/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp b/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp new file mode 100644 index 000000000000..519b38bdfa98 --- /dev/null +++ b/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp @@ -0,0 +1,459 @@ +//===-- XCoreAsmPrinter.cpp - XCore LLVM assembly writer ------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains a printer that converts from our internal representation +// of machine-dependent LLVM code to the XAS-format XCore assembly language. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "asm-printer" +#include "XCore.h" +#include "XCoreInstrInfo.h" +#include "XCoreSubtarget.h" +#include "XCoreTargetMachine.h" +#include "llvm/Constants.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Module.h" +#include "llvm/CodeGen/AsmPrinter.h" +#include "llvm/CodeGen/DwarfWriter.h" +#include "llvm/CodeGen/MachineModuleInfo.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineConstantPool.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/Target/TargetAsmInfo.h" +#include "llvm/Target/TargetData.h" +#include "llvm/Support/Mangler.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/raw_ostream.h" +#include +#include +using namespace llvm; + +STATISTIC(EmittedInsts, "Number of machine instrs printed"); + +static cl::opt FileDirective("xcore-file-directive", cl::Optional, + cl::desc("Output a file directive into the assembly file"), + cl::Hidden, + cl::value_desc("filename"), + cl::init("")); + +static cl::opt MaxThreads("xcore-max-threads", cl::Optional, + cl::desc("Maximum number of threads (for emulation thread-local storage)"), + cl::Hidden, + cl::value_desc("number"), + cl::init(8)); + +namespace { + struct VISIBILITY_HIDDEN XCoreAsmPrinter : public AsmPrinter { + XCoreAsmPrinter(raw_ostream &O, XCoreTargetMachine &TM, + const TargetAsmInfo *T) + : AsmPrinter(O, TM, T), DW(O, this, T), + Subtarget(*TM.getSubtargetImpl()) { } + + DwarfWriter DW; + const XCoreSubtarget &Subtarget; + + virtual const char *getPassName() const { + return "XCore Assembly Printer"; + } + + void printMemOperand(const MachineInstr *MI, int opNum); + void printOperand(const MachineInstr *MI, int opNum); + bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, + unsigned AsmVariant, const char *ExtraCode); + + void emitFileDirective(const std::string &filename); + void emitGlobalDirective(const std::string &name); + void emitExternDirective(const std::string &name); + + void emitArrayBound(const std::string &name, const GlobalVariable *GV); + void emitGlobal(const GlobalVariable *GV); + + void emitFunctionStart(MachineFunction &MF); + void emitFunctionEnd(MachineFunction &MF); + + bool printInstruction(const MachineInstr *MI); // autogenerated. + void printMachineInstruction(const MachineInstr *MI); + bool runOnMachineFunction(MachineFunction &F); + bool doInitialization(Module &M); + bool doFinalization(Module &M); + + void getAnalysisUsage(AnalysisUsage &AU) const { + AsmPrinter::getAnalysisUsage(AU); + AU.setPreservesAll(); + AU.addRequired(); + } + }; +} // end of anonymous namespace + +#include "XCoreGenAsmWriter.inc" + +/// createXCoreCodePrinterPass - Returns a pass that prints the XCore +/// assembly code for a MachineFunction to the given output stream, +/// using the given target machine description. This should work +/// regardless of whether the function is in SSA form. +/// +FunctionPass *llvm::createXCoreCodePrinterPass(raw_ostream &o, + XCoreTargetMachine &tm) { + return new XCoreAsmPrinter(o, tm, tm.getTargetAsmInfo()); +} + +// PrintEscapedString - Print each character of the specified string, escaping +// it if it is not printable or if it is an escape char. +static void PrintEscapedString(const std::string &Str, raw_ostream &Out) { + for (unsigned i = 0, e = Str.size(); i != e; ++i) { + unsigned char C = Str[i]; + if (isprint(C) && C != '"' && C != '\\') { + Out << C; + } else { + Out << '\\' + << (char) ((C/16 < 10) ? ( C/16 +'0') : ( C/16 -10+'A')) + << (char)(((C&15) < 10) ? ((C&15)+'0') : ((C&15)-10+'A')); + } + } +} + +void XCoreAsmPrinter:: +emitFileDirective(const std::string &name) +{ + O << "\t.file\t\""; + PrintEscapedString(name, O); + O << "\"\n"; +} + +void XCoreAsmPrinter:: +emitGlobalDirective(const std::string &name) +{ + O << TAI->getGlobalDirective() << name; + O << "\n"; +} + +void XCoreAsmPrinter:: +emitExternDirective(const std::string &name) +{ + O << "\t.extern\t" << name; + O << '\n'; +} + +void XCoreAsmPrinter:: +emitArrayBound(const std::string &name, const GlobalVariable *GV) +{ + assert((GV->hasExternalLinkage() || + GV->hasWeakLinkage()) || + GV->hasLinkOnceLinkage() && "Unexpected linkage"); + if (const ArrayType *ATy = dyn_cast( + cast(GV->getType())->getElementType())) + { + O << TAI->getGlobalDirective() << name << ".globound" << "\n"; + O << TAI->getSetDirective() << name << ".globound" << "," + << ATy->getNumElements() << "\n"; + if (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage()) { + // TODO Use COMDAT groups for LinkOnceLinkage + O << TAI->getWeakDefDirective() << name << ".globound" << "\n"; + } + } +} + +void XCoreAsmPrinter:: +emitGlobal(const GlobalVariable *GV) +{ + const TargetData *TD = TM.getTargetData(); + + if (GV->hasInitializer()) { + // Check to see if this is a special global used by LLVM, if so, emit it. + if (EmitSpecialLLVMGlobal(GV)) + return; + + SwitchToSection(TAI->SectionForGlobal(GV)); + + std::string name = Mang->getValueName(GV); + Constant *C = GV->getInitializer(); + unsigned Align = (unsigned)TD->getPreferredTypeAlignmentShift(C->getType()); + + // Mark the start of the global + O << "\t.cc_top " << name << ".data," << name << "\n"; + + switch (GV->getLinkage()) { + case GlobalValue::AppendingLinkage: + cerr << "AppendingLinkage is not supported by this target!\n"; + abort(); + case GlobalValue::LinkOnceLinkage: + case GlobalValue::WeakLinkage: + case GlobalValue::ExternalLinkage: + emitArrayBound(name, GV); + emitGlobalDirective(name); + // TODO Use COMDAT groups for LinkOnceLinkage + if (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage()) { + O << TAI->getWeakDefDirective() << name << "\n"; + } + // FALL THROUGH + case GlobalValue::InternalLinkage: + break; + case GlobalValue::GhostLinkage: + cerr << "Should not have any unmaterialized functions!\n"; + abort(); + case GlobalValue::DLLImportLinkage: + cerr << "DLLImport linkage is not supported by this target!\n"; + abort(); + case GlobalValue::DLLExportLinkage: + cerr << "DLLExport linkage is not supported by this target!\n"; + abort(); + default: + assert(0 && "Unknown linkage type!"); + } + + EmitAlignment(Align, GV, 2); + + unsigned Size = TD->getABITypeSize(C->getType()); + if (GV->isThreadLocal()) { + Size *= MaxThreads; + } + if (TAI->hasDotTypeDotSizeDirective()) { + O << "\t.type " << name << ",@object\n"; + O << "\t.size " << name << "," << Size << "\n"; + } + O << name << ":\n"; + + EmitGlobalConstant(C); + if (GV->isThreadLocal()) { + for (unsigned i = 1; i < MaxThreads; ++i) { + EmitGlobalConstant(C); + } + } + if (Size < 4) { + // The ABI requires that unsigned scalar types smaller than 32 bits + // are are padded to 32 bits. + EmitZeros(4 - Size); + } + + // Mark the end of the global + O << "\t.cc_bottom " << name << ".data\n"; + } else { + if (GV->hasExternalWeakLinkage()) + ExtWeakSymbols.insert(GV); + } +} + +/// Emit the directives on the start of functions +void XCoreAsmPrinter:: +emitFunctionStart(MachineFunction &MF) +{ + // Print out the label for the function. + const Function *F = MF.getFunction(); + + SwitchToSection(TAI->SectionForGlobal(F)); + + // Mark the start of the function + O << "\t.cc_top " << CurrentFnName << ".function," << CurrentFnName << "\n"; + + switch (F->getLinkage()) { + default: assert(0 && "Unknown linkage type!"); + case Function::InternalLinkage: // Symbols default to internal. + break; + case Function::ExternalLinkage: + emitGlobalDirective(CurrentFnName); + break; + case Function::LinkOnceLinkage: + case Function::WeakLinkage: + // TODO Use COMDAT groups for LinkOnceLinkage + O << TAI->getGlobalDirective() << CurrentFnName << "\n"; + O << TAI->getWeakDefDirective() << CurrentFnName << "\n"; + break; + } + // (1 << 1) byte aligned + EmitAlignment(1, F, 1); + if (TAI->hasDotTypeDotSizeDirective()) { + O << "\t.type " << CurrentFnName << ",@function\n"; + } + O << CurrentFnName << ":\n"; +} + +/// Emit the directives on the end of functions +void XCoreAsmPrinter:: +emitFunctionEnd(MachineFunction &MF) +{ + // Mark the end of the function + O << "\t.cc_bottom " << CurrentFnName << ".function\n"; +} + +/// runOnMachineFunction - This uses the printMachineInstruction() +/// method to print assembly for each instruction. +/// +bool XCoreAsmPrinter::runOnMachineFunction(MachineFunction &MF) +{ + SetupMachineFunction(MF); + + // Print out constants referenced by the function + EmitConstantPool(MF.getConstantPool()); + + // Print out jump tables referenced by the function + EmitJumpTableInfo(MF.getJumpTableInfo(), MF); + + // What's my mangled name? + CurrentFnName = Mang->getValueName(MF.getFunction()); + + // Emit the function start directives + emitFunctionStart(MF); + + // Emit pre-function debug information. + DW.BeginFunction(&MF); + + // Print out code for the function. + for (MachineFunction::const_iterator I = MF.begin(), E = MF.end(); + I != E; ++I) { + + // Print a label for the basic block. + if (I != MF.begin()) { + printBasicBlockLabel(I, true , true); + O << '\n'; + } + + for (MachineBasicBlock::const_iterator II = I->begin(), E = I->end(); + II != E; ++II) { + // Print the assembly for the instruction. + O << "\t"; + printMachineInstruction(II); + } + + // Each Basic Block is separated by a newline + O << '\n'; + } + + // Emit function end directives + emitFunctionEnd(MF); + + // Emit post-function debug information. + DW.EndFunction(&MF); + + // We didn't modify anything. + return false; +} + +void XCoreAsmPrinter::printMemOperand(const MachineInstr *MI, int opNum) +{ + printOperand(MI, opNum); + + if (MI->getOperand(opNum+1).isImm() + && MI->getOperand(opNum+1).getImm() == 0) + return; + + O << "+"; + printOperand(MI, opNum+1); +} + +void XCoreAsmPrinter::printOperand(const MachineInstr *MI, int opNum) { + const MachineOperand &MO = MI->getOperand(opNum); + switch (MO.getType()) { + case MachineOperand::MO_Register: + if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) + O << TM.getRegisterInfo()->get(MO.getReg()).AsmName; + else + assert(0 && "not implemented"); + break; + case MachineOperand::MO_Immediate: + O << MO.getImm(); + break; + case MachineOperand::MO_MachineBasicBlock: + printBasicBlockLabel(MO.getMBB()); + break; + case MachineOperand::MO_GlobalAddress: + O << Mang->getValueName(MO.getGlobal()); + if (MO.getGlobal()->hasExternalWeakLinkage()) + ExtWeakSymbols.insert(MO.getGlobal()); + break; + case MachineOperand::MO_ExternalSymbol: + O << MO.getSymbolName(); + break; + case MachineOperand::MO_ConstantPoolIndex: + O << TAI->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber() + << '_' << MO.getIndex(); + break; + case MachineOperand::MO_JumpTableIndex: + O << TAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber() + << '_' << MO.getIndex(); + break; + default: + assert(0 && "not implemented"); + } +} + +/// PrintAsmOperand - Print out an operand for an inline asm expression. +/// +bool XCoreAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, + unsigned AsmVariant, + const char *ExtraCode) { + printOperand(MI, OpNo); + return false; +} + +void XCoreAsmPrinter::printMachineInstruction(const MachineInstr *MI) { + ++EmittedInsts; + + // Check for mov mnemonic + unsigned src, dst; + if (TM.getInstrInfo()->isMoveInstr(*MI, src, dst)) { + O << "\tmov "; + O << TM.getRegisterInfo()->get(dst).AsmName; + O << ", "; + O << TM.getRegisterInfo()->get(src).AsmName; + O << "\n"; + return; + } + if (printInstruction(MI)) { + return; + } + assert(0 && "Unhandled instruction in asm writer!"); +} + +bool XCoreAsmPrinter::doInitialization(Module &M) { + bool Result = AsmPrinter::doInitialization(M); + + if (!FileDirective.empty()) { + emitFileDirective(FileDirective); + } + + // Print out type strings for external functions here + for (Module::const_iterator I = M.begin(), E = M.end(); + I != E; ++I) { + if (I->isDeclaration() && !I->isIntrinsic()) { + switch (I->getLinkage()) { + default: + assert(0 && "Unexpected linkage"); + case Function::ExternalWeakLinkage: + ExtWeakSymbols.insert(I); + // fallthrough + case Function::ExternalLinkage: + break; + } + } + } + + // Emit initial debug information. + DW.BeginModule(&M); + + DW.SetModuleInfo(getAnalysisToUpdate()); + return Result; +} + +bool XCoreAsmPrinter::doFinalization(Module &M) { + + // Print out module-level global variables. + for (Module::const_global_iterator I = M.global_begin(), E = M.global_end(); + I != E; ++I) { + emitGlobal(I); + } + + // Emit final debug information. + DW.EndModule(); + + return AsmPrinter::doFinalization(M); +} diff --git a/llvm/lib/Target/XCore/XCoreCallingConv.td b/llvm/lib/Target/XCore/XCoreCallingConv.td new file mode 100644 index 000000000000..8107e329bd58 --- /dev/null +++ b/llvm/lib/Target/XCore/XCoreCallingConv.td @@ -0,0 +1,33 @@ +//===- XCoreCallingConv.td - Calling Conventions for XCore -*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// This describes the calling conventions for XCore architecture. +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// XCore Return Value Calling Convention +//===----------------------------------------------------------------------===// +def RetCC_XCore : CallingConv<[ + // i32 are returned in registers R0, R1, R2, R3 + CCIfType<[i32], CCAssignToReg<[R0, R1, R2, R3]>> +]>; + +//===----------------------------------------------------------------------===// +// XCore Argument Calling Conventions +//===----------------------------------------------------------------------===// +def CC_XCore : CallingConv<[ + // Promote i8/i16 arguments to i32. + CCIfType<[i8, i16], CCPromoteToType>, + + // The first 4 integer arguments are passed in integer registers. + CCIfType<[i32], CCAssignToReg<[R0, R1, R2, R3]>>, + + // Integer values get stored in stack slots that are 4 bytes in + // size and 4-byte aligned. + CCIfType<[i32], CCAssignToStack<4, 4>> +]>; diff --git a/llvm/lib/Target/XCore/XCoreFrameInfo.cpp b/llvm/lib/Target/XCore/XCoreFrameInfo.cpp new file mode 100644 index 000000000000..f50dc96c6ba9 --- /dev/null +++ b/llvm/lib/Target/XCore/XCoreFrameInfo.cpp @@ -0,0 +1,27 @@ +//===-- XCoreFrameInfo.cpp - Frame info for XCore Target ---------*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains XCore frame information that doesn't fit anywhere else +// cleanly... +// +//===----------------------------------------------------------------------===// + +#include "XCore.h" +#include "XCoreFrameInfo.h" +using namespace llvm; + +//===----------------------------------------------------------------------===// +// XCoreFrameInfo: +//===----------------------------------------------------------------------===// + +XCoreFrameInfo::XCoreFrameInfo(const TargetMachine &tm): + TargetFrameInfo(TargetFrameInfo::StackGrowsDown, 4, 0) +{ + // Do nothing +} diff --git a/llvm/lib/Target/XCore/XCoreFrameInfo.h b/llvm/lib/Target/XCore/XCoreFrameInfo.h new file mode 100644 index 000000000000..2c67577181ec --- /dev/null +++ b/llvm/lib/Target/XCore/XCoreFrameInfo.h @@ -0,0 +1,34 @@ +//===-- XCoreFrameInfo.h - Frame info for XCore Target -----------*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains XCore frame information that doesn't fit anywhere else +// cleanly... +// +//===----------------------------------------------------------------------===// + +#ifndef XCOREFRAMEINFO_H +#define XCOREFRAMEINFO_H + +#include "llvm/Target/TargetFrameInfo.h" +#include "llvm/Target/TargetMachine.h" + +namespace llvm { + class XCoreFrameInfo: public TargetFrameInfo { + + public: + XCoreFrameInfo(const TargetMachine &tm); + + //! Stack slot size (4 bytes) + static int stackSlotSize() { + return 4; + } + }; +} + +#endif // XCOREFRAMEINFO_H diff --git a/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp b/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp new file mode 100644 index 000000000000..338f98e3853a --- /dev/null +++ b/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp @@ -0,0 +1,228 @@ +//===-- XCoreISelDAGToDAG.cpp - A dag to dag inst selector for XCore ------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines an instruction selector for the XCore target. +// +//===----------------------------------------------------------------------===// + +#include "XCore.h" +#include "XCoreISelLowering.h" +#include "XCoreTargetMachine.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Function.h" +#include "llvm/Intrinsics.h" +#include "llvm/CallingConv.h" +#include "llvm/Constants.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/SelectionDAG.h" +#include "llvm/CodeGen/SelectionDAGISel.h" +#include "llvm/Target/TargetLowering.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/Debug.h" +#include +#include +using namespace llvm; + +/// XCoreDAGToDAGISel - XCore specific code to select XCore machine +/// instructions for SelectionDAG operations. +/// +namespace { + class XCoreDAGToDAGISel : public SelectionDAGISel { + XCoreTargetLowering &Lowering; + const XCoreSubtarget &Subtarget; + + public: + XCoreDAGToDAGISel(XCoreTargetMachine &TM) + : SelectionDAGISel(*TM.getTargetLowering()), + Lowering(*TM.getTargetLowering()), + Subtarget(*TM.getSubtargetImpl()) { } + + SDNode *Select(SDValue Op); + + /// getI32Imm - Return a target constant with the specified value, of type + /// i32. + inline SDValue getI32Imm(unsigned Imm) { + return CurDAG->getTargetConstant(Imm, MVT::i32); + } + + // Complex Pattern Selectors. + bool SelectADDRspii(SDValue Op, SDValue Addr, SDValue &Base, + SDValue &Offset); + bool SelectADDRdpii(SDValue Op, SDValue Addr, SDValue &Base, + SDValue &Offset); + bool SelectADDRcpii(SDValue Op, SDValue Addr, SDValue &Base, + SDValue &Offset); + + virtual void InstructionSelect(); + + virtual const char *getPassName() const { + return "XCore DAG->DAG Pattern Instruction Selection"; + } + + // Include the pieces autogenerated from the target description. + #include "XCoreGenDAGISel.inc" + }; +} // end anonymous namespace + +/// createXCoreISelDag - This pass converts a legalized DAG into a +/// XCore-specific DAG, ready for instruction scheduling. +/// +FunctionPass *llvm::createXCoreISelDag(XCoreTargetMachine &TM) { + return new XCoreDAGToDAGISel(TM); +} + +bool XCoreDAGToDAGISel::SelectADDRspii(SDValue Op, SDValue Addr, + SDValue &Base, SDValue &Offset) { + FrameIndexSDNode *FIN = 0; + if (FIN = dyn_cast(Addr)) { + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32); + Offset = CurDAG->getTargetConstant(0, MVT::i32); + return true; + } + if (Addr.getOpcode() == ISD::ADD) { + ConstantSDNode *CN = 0; + if ((FIN = dyn_cast(Addr.getOperand(0))) + && (CN = dyn_cast(Addr.getOperand(1))) + && (CN->getSExtValue() % 4 == 0)) { + // Constant word offset from frame pointer + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32); + Offset = CurDAG->getTargetConstant(CN->getSExtValue(), MVT::i32); + return true; + } + } + return false; +} + +bool XCoreDAGToDAGISel::SelectADDRdpii(SDValue Op, SDValue Addr, + SDValue &Base, SDValue &Offset) { + if (Addr.getOpcode() == XCoreISD::DPRelativeWrapper) { + Base = Addr.getOperand(0); + Offset = CurDAG->getTargetConstant(0, MVT::i32); + return true; + } + if (Addr.getOpcode() == ISD::ADD) { + ConstantSDNode *CN = 0; + if ((Addr.getOperand(0).getOpcode() == XCoreISD::DPRelativeWrapper) + && (CN = dyn_cast(Addr.getOperand(1))) + && (CN->getSExtValue() % 4 == 0)) { + // Constant word offset from a object in the data region + Base = Addr.getOperand(0).getOperand(0); + Offset = CurDAG->getTargetConstant(CN->getSExtValue(), MVT::i32); + return true; + } + } + return false; +} + +bool XCoreDAGToDAGISel::SelectADDRcpii(SDValue Op, SDValue Addr, + SDValue &Base, SDValue &Offset) { + if (Addr.getOpcode() == XCoreISD::CPRelativeWrapper) { + Base = Addr.getOperand(0); + Offset = CurDAG->getTargetConstant(0, MVT::i32); + return true; + } + if (Addr.getOpcode() == ISD::ADD) { + ConstantSDNode *CN = 0; + if ((Addr.getOperand(0).getOpcode() == XCoreISD::CPRelativeWrapper) + && (CN = dyn_cast(Addr.getOperand(1))) + && (CN->getSExtValue() % 4 == 0)) { + // Constant word offset from a object in the data region + Base = Addr.getOperand(0).getOperand(0); + Offset = CurDAG->getTargetConstant(CN->getSExtValue(), MVT::i32); + return true; + } + } + return false; +} + +/// InstructionSelect - This callback is invoked by +/// SelectionDAGISel when it has created a SelectionDAG for us to codegen. +void XCoreDAGToDAGISel:: +InstructionSelect() { + DEBUG(BB->dump()); + + // Select target instructions for the DAG. + SelectRoot(*CurDAG); + + CurDAG->RemoveDeadNodes(); +} + +SDNode *XCoreDAGToDAGISel::Select(SDValue Op) { + SDNode *N = Op.getNode(); + MVT NVT = N->getValueType(0); + if (NVT == MVT::i32) { + switch (N->getOpcode()) { + default: break; + case ISD::Constant: { + if (Predicate_immMskBitp(N)) { + SDValue MskSize = Transform_msksize_xform(N); + return CurDAG->getTargetNode(XCore::MKMSK_rus, MVT::i32, MskSize); + } + else if (! Predicate_immU16(N)) { + unsigned Val = cast(N)->getZExtValue(); + SDValue CPIdx = + CurDAG->getTargetConstantPool(ConstantInt::get(Type::Int32Ty, Val), + TLI.getPointerTy()); + return CurDAG->getTargetNode(XCore::LDWCP_lru6, MVT::i32, MVT::Other, + CPIdx, CurDAG->getEntryNode()); + } + break; + } + case ISD::SMUL_LOHI: { + // FIXME fold addition into the macc instruction + if (!Subtarget.isXS1A()) { + SDValue Zero(CurDAG->getTargetNode(XCore::LDC_ru6, MVT::i32, + CurDAG->getTargetConstant(0, MVT::i32)), 0); + SDValue Ops[] = { Zero, Zero, Op.getOperand(0), Op.getOperand(1) }; + SDNode *ResNode = CurDAG->getTargetNode(XCore::MACCS_l4r, MVT::i32, + MVT::i32, Ops, 4); + ReplaceUses(SDValue(N, 0), SDValue(ResNode, 1)); + ReplaceUses(SDValue(N, 1), SDValue(ResNode, 0)); + return NULL; + } + break; + } + case ISD::UMUL_LOHI: { + // FIXME fold addition into the macc / lmul instruction + SDValue Zero(CurDAG->getTargetNode(XCore::LDC_ru6, MVT::i32, + CurDAG->getTargetConstant(0, MVT::i32)), 0); + SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1), + Zero, Zero }; + SDNode *ResNode = CurDAG->getTargetNode(XCore::LMUL_l6r, MVT::i32, + MVT::i32, Ops, 4); + ReplaceUses(SDValue(N, 0), SDValue(ResNode, 1)); + ReplaceUses(SDValue(N, 1), SDValue(ResNode, 0)); + return NULL; + } + case XCoreISD::LADD: { + if (!Subtarget.isXS1A()) { + SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1), + Op.getOperand(2) }; + return CurDAG->getTargetNode(XCore::LADD_l5r, MVT::i32, MVT::i32, + Ops, 3); + } + break; + } + case XCoreISD::LSUB: { + if (!Subtarget.isXS1A()) { + SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1), + Op.getOperand(2) }; + return CurDAG->getTargetNode(XCore::LSUB_l5r, MVT::i32, MVT::i32, + Ops, 3); + } + break; + } + // Other cases are autogenerated. + } + } + return SelectCode(Op); +} diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp new file mode 100644 index 000000000000..96b1ae7c5d38 --- /dev/null +++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp @@ -0,0 +1,919 @@ +//===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the XCoreTargetLowering class. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "xcore-lower" + +#include "XCoreISelLowering.h" +#include "XCoreMachineFunctionInfo.h" +#include "XCore.h" +#include "XCoreTargetMachine.h" +#include "XCoreSubtarget.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Function.h" +#include "llvm/Intrinsics.h" +#include "llvm/CallingConv.h" +#include "llvm/GlobalVariable.h" +#include "llvm/GlobalAlias.h" +#include "llvm/CodeGen/CallingConvLower.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/SelectionDAGISel.h" +#include "llvm/CodeGen/ValueTypes.h" +#include "llvm/Support/Debug.h" +#include "llvm/ADT/VectorExtras.h" +#include +#include +using namespace llvm; + +const char *XCoreTargetLowering:: +getTargetNodeName(unsigned Opcode) const +{ + switch (Opcode) + { + case XCoreISD::BL : return "XCoreISD::BL"; + case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper"; + case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper"; + case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper"; + case XCoreISD::STWSP : return "XCoreISD::STWSP"; + case XCoreISD::RETSP : return "XCoreISD::RETSP"; + default : return NULL; + } +} + +XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM) + : TargetLowering(XTM), + TM(XTM), + Subtarget(*XTM.getSubtargetImpl()) { + + // Set up the register classes. + addRegisterClass(MVT::i32, XCore::GRRegsRegisterClass); + + // Compute derived properties from the register classes + computeRegisterProperties(); + + // Division is expensive + setIntDivIsCheap(false); + + setShiftAmountType(MVT::i32); + // shl X, 32 == 0 + setShiftAmountFlavor(Extend); + setStackPointerRegisterToSaveRestore(XCore::SP); + + setSchedulingPreference(SchedulingForRegPressure); + + // Use i32 for setcc operations results (slt, sgt, ...). + setSetCCResultContents(ZeroOrOneSetCCResult); + + // XCore does not have the NodeTypes below. + setOperationAction(ISD::BR_CC, MVT::Other, Expand); + setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); + setOperationAction(ISD::ADDC, MVT::i32, Expand); + setOperationAction(ISD::ADDE, MVT::i32, Expand); + setOperationAction(ISD::SUBC, MVT::i32, Expand); + setOperationAction(ISD::SUBE, MVT::i32, Expand); + + // Stop the combiner recombining select and set_cc + setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); + + // 64bit + setOperationAction(ISD::ADD, MVT::i64, Custom); + setOperationAction(ISD::SUB, MVT::i64, Custom); + + if (Subtarget.isXS1A()) { + setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); + } + setOperationAction(ISD::MULHS, MVT::i32, Expand); + setOperationAction(ISD::MULHU, MVT::i32, Expand); + setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); + setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); + setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); + + // Bit Manipulation + setOperationAction(ISD::CTPOP, MVT::i32, Expand); + setOperationAction(ISD::ROTL , MVT::i32, Expand); + setOperationAction(ISD::ROTR , MVT::i32, Expand); + + // Expand jump tables for now + setOperationAction(ISD::BR_JT, MVT::Other, Expand); + setOperationAction(ISD::JumpTable, MVT::i32, Custom); + + // RET must be custom lowered, to meet ABI requirements + setOperationAction(ISD::RET, MVT::Other, Custom); + + setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); + + // Thread Local Storage + setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); + + // Conversion of i64 -> double produces constantpool nodes + setOperationAction(ISD::ConstantPool, MVT::i32, Custom); + + // Loads + setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); + setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); + setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); + + setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand); + setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Expand); + + // Varargs + setOperationAction(ISD::VAEND, MVT::Other, Expand); + setOperationAction(ISD::VACOPY, MVT::Other, Expand); + setOperationAction(ISD::VAARG, MVT::Other, Custom); + setOperationAction(ISD::VASTART, MVT::Other, Custom); + + // Dynamic stack + setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); + setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); + + // Debug + setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand); + setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); +} + +SDValue XCoreTargetLowering:: +LowerOperation(SDValue Op, SelectionDAG &DAG) { + switch (Op.getOpcode()) + { + case ISD::CALL: return LowerCALL(Op, DAG); + case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG); + case ISD::RET: return LowerRET(Op, DAG); + case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); + case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); + case ISD::ConstantPool: return LowerConstantPool(Op, DAG); + case ISD::JumpTable: return LowerJumpTable(Op, DAG); + case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); + case ISD::VAARG: return LowerVAARG(Op, DAG); + case ISD::VASTART: return LowerVASTART(Op, DAG); + // FIXME: Remove these when LegalizeDAGTypes lands. + case ISD::ADD: + case ISD::SUB: return SDValue(ExpandADDSUB(Op.getNode(), DAG),0); + + case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); + default: + assert(0 && "unimplemented operand"); + return SDValue(); + } +} + +SDNode *XCoreTargetLowering:: +ExpandOperationResult(SDNode *N, SelectionDAG &DAG) { + switch (N->getOpcode()) { + case ISD::SUB: + case ISD::ADD: + return ExpandADDSUB(N, DAG); + default: + assert(0 && "Wasn't expecting to be able to lower this!"); + return NULL; + } +} + +//===----------------------------------------------------------------------===// +// Misc Lower Operation implementation +//===----------------------------------------------------------------------===// + +SDValue XCoreTargetLowering:: +LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) +{ + SDValue Cond = DAG.getNode(ISD::SETCC, MVT::i32, Op.getOperand(2), + Op.getOperand(3), Op.getOperand(4)); + return DAG.getNode(ISD::SELECT, MVT::i32, Cond, Op.getOperand(0), + Op.getOperand(1)); +} + +SDValue XCoreTargetLowering:: +getGlobalAddressWrapper(SDValue GA, GlobalValue *GV, SelectionDAG &DAG) +{ + if (isa(GV)) { + return DAG.getNode(XCoreISD::PCRelativeWrapper, MVT::i32, GA); + } else if (!Subtarget.isXS1A()) { + const GlobalVariable *GVar = dyn_cast(GV); + if (!GVar) { + // If GV is an alias then use the aliasee to determine constness + if (const GlobalAlias *GA = dyn_cast(GV)) + GVar = dyn_cast_or_null(GA->resolveAliasedGlobal()); + } + bool isConst = GVar && GVar->isConstant(); + if (isConst) { + return DAG.getNode(XCoreISD::CPRelativeWrapper, MVT::i32, GA); + } + } + return DAG.getNode(XCoreISD::DPRelativeWrapper, MVT::i32, GA); +} + +SDValue XCoreTargetLowering:: +LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) +{ + GlobalValue *GV = cast(Op)->getGlobal(); + SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32); + // If it's a debug information descriptor, don't mess with it. + if (DAG.isVerifiedDebugInfoDesc(Op)) + return GA; + return getGlobalAddressWrapper(GA, GV, DAG); +} + +static inline SDValue BuildGetId(SelectionDAG &DAG) { + // TODO + assert(0 && "Unimplemented"); + return SDValue(); +} + +static inline bool isZeroLengthArray(const Type *Ty) { + const ArrayType *AT = dyn_cast_or_null(Ty); + return AT && (AT->getNumElements() == 0); +} + +SDValue XCoreTargetLowering:: +LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) +{ + // transform to label + getid() * size + GlobalValue *GV = cast(Op)->getGlobal(); + SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32); + const GlobalVariable *GVar = dyn_cast(GV); + if (!GVar) { + // If GV is an alias then use the aliasee to determine size + if (const GlobalAlias *GA = dyn_cast(GV)) + GVar = dyn_cast_or_null(GA->resolveAliasedGlobal()); + } + if (! GVar) { + assert(0 && "Thread local object not a GlobalVariable?"); + return SDValue(); + } + const Type *Ty = cast(GV->getType())->getElementType(); + if (!Ty->isSized() || isZeroLengthArray(Ty)) { + cerr << "Size of thread local object " << GVar->getName() + << " is unknown\n"; + abort(); + } + SDValue base = getGlobalAddressWrapper(GA, GV, DAG); + const TargetData *TD = TM.getTargetData(); + unsigned Size = TD->getABITypeSize(Ty); + SDValue offset = DAG.getNode(ISD::MUL, MVT::i32, BuildGetId(DAG), + DAG.getConstant(Size, MVT::i32)); + return DAG.getNode(ISD::ADD, MVT::i32, base, offset); +} + +SDValue XCoreTargetLowering:: +LowerConstantPool(SDValue Op, SelectionDAG &DAG) +{ + ConstantPoolSDNode *CP = cast(Op); + if (Subtarget.isXS1A()) { + assert(0 && "Lowering of constant pool unimplemented"); + return SDValue(); + } else { + MVT PtrVT = Op.getValueType(); + SDValue Res; + if (CP->isMachineConstantPoolEntry()) { + Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, + CP->getAlignment()); + } else { + Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, + CP->getAlignment()); + } + return DAG.getNode(XCoreISD::CPRelativeWrapper, MVT::i32, Res); + } +} + +SDValue XCoreTargetLowering:: +LowerJumpTable(SDValue Op, SelectionDAG &DAG) +{ + MVT PtrVT = Op.getValueType(); + JumpTableSDNode *JT = cast(Op); + SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); + return DAG.getNode(XCoreISD::DPRelativeWrapper, MVT::i32, JTI); +} + +SDNode *XCoreTargetLowering:: +ExpandADDSUB(SDNode *N, SelectionDAG &DAG) +{ + assert(N->getValueType(0) == MVT::i64 && + (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && + "Unknown operand to lower!"); + + // Extract components + SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, N->getOperand(0), + DAG.getConstant(0, MVT::i32)); + SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, N->getOperand(0), + DAG.getConstant(1, MVT::i32)); + SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, N->getOperand(1), + DAG.getConstant(0, MVT::i32)); + SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, N->getOperand(1), + DAG.getConstant(1, MVT::i32)); + + // Expand + if (Subtarget.isXS1A()) { + SDValue Lo = DAG.getNode(N->getOpcode(), MVT::i32, LHSL, RHSL); + + ISD::CondCode CarryCC = (N->getOpcode() == ISD::ADD) ? ISD::SETULT : + ISD::SETUGT; + SDValue Carry = DAG.getSetCC(MVT::i32, Lo, LHSL, CarryCC); + + SDValue Hi = DAG.getNode(N->getOpcode(), MVT::i32, LHSH, Carry); + Hi = DAG.getNode(N->getOpcode(), MVT::i32, Hi, RHSH); + // Merge the pieces + return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi).getNode(); + } + unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD : + XCoreISD::LSUB; + SDValue Zero = DAG.getConstant(0, MVT::i32); + SDValue Carry = DAG.getNode(Opcode, DAG.getVTList(MVT::i32, MVT::i32), + LHSL, RHSL, Zero); + SDValue Lo(Carry.getNode(), 1); + + SDValue Ignored = DAG.getNode(Opcode, DAG.getVTList(MVT::i32, MVT::i32), + LHSH, RHSH, Carry); + SDValue Hi(Ignored.getNode(), 1); + // Merge the pieces + return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi).getNode(); +} + +SDValue XCoreTargetLowering:: +LowerVAARG(SDValue Op, SelectionDAG &DAG) +{ + assert(0 && "unimplemented"); + // FIX Arguments passed by reference need a extra dereference. + SDNode *Node = Op.getNode(); + const Value *V = cast(Node->getOperand(2))->getValue(); + MVT VT = Node->getValueType(0); + SDValue VAList = DAG.getLoad(getPointerTy(), Node->getOperand(0), + Node->getOperand(1), V, 0); + // Increment the pointer, VAList, to the next vararg + SDValue Tmp3 = DAG.getNode(ISD::ADD, getPointerTy(), VAList, + DAG.getConstant(VT.getSizeInBits(), + getPointerTy())); + // Store the incremented VAList to the legalized pointer + Tmp3 = DAG.getStore(VAList.getValue(1), Tmp3, Node->getOperand(1), V, 0); + // Load the actual argument out of the pointer VAList + return DAG.getLoad(VT, Tmp3, VAList, NULL, 0); +} + +SDValue XCoreTargetLowering:: +LowerVASTART(SDValue Op, SelectionDAG &DAG) +{ + // vastart stores the address of the VarArgsFrameIndex slot into the + // memory location argument + MachineFunction &MF = DAG.getMachineFunction(); + XCoreFunctionInfo *XFI = MF.getInfo(); + SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32); + const Value *SV = cast(Op.getOperand(2))->getValue(); + return DAG.getStore(Op.getOperand(0), Addr, Op.getOperand(1), SV, 0); +} + +SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) { + // Depths > 0 not supported yet! + if (cast(Op.getOperand(0))->getZExtValue() > 0) + return SDValue(); + + MachineFunction &MF = DAG.getMachineFunction(); + const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo(); + return DAG.getCopyFromReg(DAG.getEntryNode(), RegInfo->getFrameRegister(MF), + MVT::i32); +} + +//===----------------------------------------------------------------------===// +// Calling Convention Implementation +// +// The lower operations present on calling convention works on this order: +// LowerCALL (virt regs --> phys regs, virt regs --> stack) +// LowerFORMAL_ARGUMENTS (phys --> virt regs, stack --> virt regs) +// LowerRET (virt regs --> phys regs) +// LowerCALL (phys regs --> virt regs) +// +//===----------------------------------------------------------------------===// + +#include "XCoreGenCallingConv.inc" + +//===----------------------------------------------------------------------===// +// CALL Calling Convention Implementation +//===----------------------------------------------------------------------===// + +/// XCore custom CALL implementation +SDValue XCoreTargetLowering:: +LowerCALL(SDValue Op, SelectionDAG &DAG) +{ + CallSDNode *TheCall = cast(Op.getNode()); + unsigned CallingConv = TheCall->getCallingConv(); + // For now, only CallingConv::C implemented + switch (CallingConv) + { + default: + assert(0 && "Unsupported calling convention"); + case CallingConv::Fast: + case CallingConv::C: + return LowerCCCCallTo(Op, DAG, CallingConv); + } +} + +/// LowerCCCCallTo - functions arguments are copied from virtual +/// regs to (physical regs)/(stack frame), CALLSEQ_START and +/// CALLSEQ_END are emitted. +/// TODO: isTailCall, sret. +SDValue XCoreTargetLowering:: +LowerCCCCallTo(SDValue Op, SelectionDAG &DAG, unsigned CC) +{ + CallSDNode *TheCall = cast(Op.getNode()); + SDValue Chain = TheCall->getChain(); + SDValue Callee = TheCall->getCallee(); + bool isVarArg = TheCall->isVarArg(); + + // Analyze operands of the call, assigning locations to each operand. + SmallVector ArgLocs; + CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); + + // The ABI dictates there should be one stack slot available to the callee + // on function entry (for saving lr). + CCInfo.AllocateStack(4, 4); + + CCInfo.AnalyzeCallOperands(TheCall, CC_XCore); + + // Get a count of how many bytes are to be pushed on the stack. + unsigned NumBytes = CCInfo.getNextStackOffset(); + + Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, + getPointerTy(), true)); + + SmallVector, 4> RegsToPass; + SmallVector MemOpChains; + + // Walk the register/memloc assignments, inserting copies/loads. + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { + CCValAssign &VA = ArgLocs[i]; + + // Arguments start after the 5 first operands of ISD::CALL + SDValue Arg = TheCall->getArg(i); + + // Promote the value if needed. + switch (VA.getLocInfo()) { + default: assert(0 && "Unknown loc info!"); + case CCValAssign::Full: break; + case CCValAssign::SExt: + Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); + break; + case CCValAssign::ZExt: + Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); + break; + case CCValAssign::AExt: + Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); + break; + } + + // Arguments that can be passed on register must be kept at + // RegsToPass vector + if (VA.isRegLoc()) { + RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); + } else { + assert(VA.isMemLoc()); + + int Offset = VA.getLocMemOffset(); + + MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, MVT::Other, Chain, Arg, + DAG.getConstant(Offset/4, MVT::i32))); + } + } + + // Transform all store nodes into one single node because + // all store nodes are independent of each other. + if (!MemOpChains.empty()) + Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, + &MemOpChains[0], MemOpChains.size()); + + // Build a sequence of copy-to-reg nodes chained together with token + // chain and flag operands which copy the outgoing args into registers. + // The InFlag in necessary since all emited instructions must be + // stuck together. + SDValue InFlag; + for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { + Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, + RegsToPass[i].second, InFlag); + InFlag = Chain.getValue(1); + } + + // If the callee is a GlobalAddress node (quite common, every direct call is) + // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. + // Likewise ExternalSymbol -> TargetExternalSymbol. + if (GlobalAddressSDNode *G = dyn_cast(Callee)) + Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32); + else if (ExternalSymbolSDNode *E = dyn_cast(Callee)) + Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); + + // XCoreBranchLink = #chain, #target_address, #opt_in_flags... + // = Chain, Callee, Reg#1, Reg#2, ... + // + // Returns a chain & a flag for retval copy to use. + SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); + SmallVector Ops; + Ops.push_back(Chain); + Ops.push_back(Callee); + + // Add argument registers to the end of the list so that they are + // known live into the call. + for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) + Ops.push_back(DAG.getRegister(RegsToPass[i].first, + RegsToPass[i].second.getValueType())); + + if (InFlag.getNode()) + Ops.push_back(InFlag); + + Chain = DAG.getNode(XCoreISD::BL, NodeTys, &Ops[0], Ops.size()); + InFlag = Chain.getValue(1); + + // Create the CALLSEQ_END node. + Chain = DAG.getCALLSEQ_END(Chain, + DAG.getConstant(NumBytes, getPointerTy(), true), + DAG.getConstant(0, getPointerTy(), true), + InFlag); + InFlag = Chain.getValue(1); + + // Handle result values, copying them out of physregs into vregs that we + // return. + return SDValue(LowerCallResult(Chain, InFlag, TheCall, CC, DAG), + Op.getResNo()); +} + +/// LowerCallResult - Lower the result values of an ISD::CALL into the +/// appropriate copies out of appropriate physical registers. This assumes that +/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call +/// being lowered. Returns a SDNode with the same number of values as the +/// ISD::CALL. +SDNode *XCoreTargetLowering:: +LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall, + unsigned CallingConv, SelectionDAG &DAG) { + bool isVarArg = TheCall->isVarArg(); + + // Assign locations to each value returned by this call. + SmallVector RVLocs; + CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs); + + CCInfo.AnalyzeCallResult(TheCall, RetCC_XCore); + SmallVector ResultVals; + + // Copy all of the result registers out of their specified physreg. + for (unsigned i = 0; i != RVLocs.size(); ++i) { + Chain = DAG.getCopyFromReg(Chain, RVLocs[i].getLocReg(), + RVLocs[i].getValVT(), InFlag).getValue(1); + InFlag = Chain.getValue(2); + ResultVals.push_back(Chain.getValue(0)); + } + + ResultVals.push_back(Chain); + + // Merge everything together with a MERGE_VALUES node. + return DAG.getNode(ISD::MERGE_VALUES, TheCall->getVTList(), + &ResultVals[0], ResultVals.size()).getNode(); +} + +//===----------------------------------------------------------------------===// +// FORMAL_ARGUMENTS Calling Convention Implementation +//===----------------------------------------------------------------------===// + +/// XCore custom FORMAL_ARGUMENTS implementation +SDValue XCoreTargetLowering:: +LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) +{ + unsigned CC = cast(Op.getOperand(1))->getZExtValue(); + switch(CC) + { + default: + assert(0 && "Unsupported calling convention"); + case CallingConv::C: + case CallingConv::Fast: + return LowerCCCArguments(Op, DAG); + } +} + +/// LowerCCCArguments - transform physical registers into +/// virtual registers and generate load operations for +/// arguments places on the stack. +/// TODO: sret +SDValue XCoreTargetLowering:: +LowerCCCArguments(SDValue Op, SelectionDAG &DAG) +{ + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo *MFI = MF.getFrameInfo(); + MachineRegisterInfo &RegInfo = MF.getRegInfo(); + SDValue Root = Op.getOperand(0); + bool isVarArg = cast(Op.getOperand(2))->getZExtValue() != 0; + unsigned CC = MF.getFunction()->getCallingConv(); + + // Assign locations to all of the incoming arguments. + SmallVector ArgLocs; + CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); + + CCInfo.AnalyzeFormalArguments(Op.getNode(), CC_XCore); + + unsigned StackSlotSize = XCoreFrameInfo::stackSlotSize(); + + SmallVector ArgValues; + + unsigned LRSaveSize = StackSlotSize; + + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { + + CCValAssign &VA = ArgLocs[i]; + + if (VA.isRegLoc()) { + // Arguments passed in registers + MVT RegVT = VA.getLocVT(); + switch (RegVT.getSimpleVT()) { + default: + cerr << "LowerFORMAL_ARGUMENTS Unhandled argument type: " + << RegVT.getSimpleVT() + << "\n"; + abort(); + case MVT::i32: + unsigned VReg = RegInfo.createVirtualRegister( + XCore::GRRegsRegisterClass); + RegInfo.addLiveIn(VA.getLocReg(), VReg); + ArgValues.push_back(DAG.getCopyFromReg(Root, VReg, RegVT)); + } + } else { + // sanity check + assert(VA.isMemLoc()); + // Load the argument to a virtual register + unsigned ObjSize = VA.getLocVT().getSizeInBits()/8; + if (ObjSize > StackSlotSize) { + cerr << "LowerFORMAL_ARGUMENTS Unhandled argument type: " + << VA.getLocVT().getSimpleVT() + << "\n"; + } + // Create the frame index object for this incoming parameter... + int FI = MFI->CreateFixedObject(ObjSize, + LRSaveSize + VA.getLocMemOffset()); + + // Create the SelectionDAG nodes corresponding to a load + //from this parameter + SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); + ArgValues.push_back(DAG.getLoad(VA.getLocVT(), Root, FIN, NULL, 0)); + } + } + + if (isVarArg) { + /* Argument registers */ + static const unsigned ArgRegs[] = { + XCore::R0, XCore::R1, XCore::R2, XCore::R3 + }; + XCoreFunctionInfo *XFI = MF.getInfo(); + unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs, + array_lengthof(ArgRegs)); + if (FirstVAReg < array_lengthof(ArgRegs)) { + SmallVector MemOps; + int offset = 0; + // Save remaining registers, storing higher register numbers at a higher + // address + for (unsigned i = array_lengthof(ArgRegs) - 1; i >= FirstVAReg; --i) { + // Create a stack slot + int FI = MFI->CreateFixedObject(4, offset); + if (i == FirstVAReg) { + XFI->setVarArgsFrameIndex(FI); + } + offset -= StackSlotSize; + SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); + // Move argument from phys reg -> virt reg + unsigned VReg = RegInfo.createVirtualRegister( + XCore::GRRegsRegisterClass); + RegInfo.addLiveIn(ArgRegs[i], VReg); + SDValue Val = DAG.getCopyFromReg(Root, VReg, MVT::i32); + // Move argument from virt reg -> stack + SDValue Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); + MemOps.push_back(Store); + } + if (!MemOps.empty()) + Root = DAG.getNode(ISD::TokenFactor, MVT::Other, + &MemOps[0], MemOps.size()); + } else { + // This will point to the next argument passed via stack. + XFI->setVarArgsFrameIndex( + MFI->CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset())); + } + } + + ArgValues.push_back(Root); + + // Return the new list of results. + std::vector RetVT(Op.getNode()->value_begin(), + Op.getNode()->value_end()); + return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size()); +} + +//===----------------------------------------------------------------------===// +// Return Value Calling Convention Implementation +//===----------------------------------------------------------------------===// + +SDValue XCoreTargetLowering:: +LowerRET(SDValue Op, SelectionDAG &DAG) +{ + // CCValAssign - represent the assignment of + // the return value to a location + SmallVector RVLocs; + unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); + bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); + + // CCState - Info about the registers and stack slot. + CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs); + + // Analize return values of ISD::RET + CCInfo.AnalyzeReturn(Op.getNode(), RetCC_XCore); + + // If this is the first return lowered for this function, add + // the regs to the liveout set for the function. + if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { + for (unsigned i = 0; i != RVLocs.size(); ++i) + if (RVLocs[i].isRegLoc()) + DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); + } + + // The chain is always operand #0 + SDValue Chain = Op.getOperand(0); + SDValue Flag; + + // Copy the result values into the output registers. + for (unsigned i = 0; i != RVLocs.size(); ++i) { + CCValAssign &VA = RVLocs[i]; + assert(VA.isRegLoc() && "Can only return in registers!"); + + // ISD::RET => ret chain, (regnum1,val1), ... + // So i*2+1 index only the regnums + Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1), Flag); + + // guarantee that all emitted copies are + // stuck together, avoiding something bad + Flag = Chain.getValue(1); + } + + // Return on XCore is always a "retsp 0" + if (Flag.getNode()) + return DAG.getNode(XCoreISD::RETSP, MVT::Other, + Chain, DAG.getConstant(0, MVT::i32), Flag); + else // Return Void + return DAG.getNode(XCoreISD::RETSP, MVT::Other, + Chain, DAG.getConstant(0, MVT::i32)); +} + +//===----------------------------------------------------------------------===// +// Other Lowering Code +//===----------------------------------------------------------------------===// + +MachineBasicBlock * +XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, + MachineBasicBlock *BB) { + const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo(); + assert((MI->getOpcode() == XCore::SELECT_CC) && + "Unexpected instr type to insert"); + + // To "insert" a SELECT_CC instruction, we actually have to insert the diamond + // control-flow pattern. The incoming instruction knows the destination vreg + // to set, the condition code register to branch on, the true/false values to + // select between, and a branch opcode to use. + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + MachineFunction::iterator It = BB; + ++It; + + // thisMBB: + // ... + // TrueVal = ... + // cmpTY ccX, r1, r2 + // bCC copy1MBB + // fallthrough --> copy0MBB + MachineBasicBlock *thisMBB = BB; + MachineFunction *F = BB->getParent(); + MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); + BuildMI(BB, TII.get(XCore::BRFT_lru6)) + .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); + F->insert(It, copy0MBB); + F->insert(It, sinkMBB); + // Update machine-CFG edges by transferring all successors of the current + // block to the new block which will contain the Phi node for the select. + sinkMBB->transferSuccessors(BB); + // Next, add the true and fallthrough blocks as its successors. + BB->addSuccessor(copy0MBB); + BB->addSuccessor(sinkMBB); + + // copy0MBB: + // %FalseValue = ... + // # fallthrough to sinkMBB + BB = copy0MBB; + + // Update machine-CFG edges + BB->addSuccessor(sinkMBB); + + // sinkMBB: + // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] + // ... + BB = sinkMBB; + BuildMI(BB, TII.get(XCore::PHI), MI->getOperand(0).getReg()) + .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) + .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); + + F->DeleteMachineInstr(MI); // The pseudo instruction is gone now. + return BB; +} + +//===----------------------------------------------------------------------===// +// Addressing mode description hooks +//===----------------------------------------------------------------------===// + +static inline bool isImmUs(int64_t val) +{ + return (val >= 0 && val <= 11); +} + +static inline bool isImmUs2(int64_t val) +{ + return (val%2 == 0 && isImmUs(val/2)); +} + +static inline bool isImmUs4(int64_t val) +{ + return (val%4 == 0 && isImmUs(val/4)); +} + +/// isLegalAddressingMode - Return true if the addressing mode represented +/// by AM is legal for this target, for a load/store of the specified type. +bool +XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM, + const Type *Ty) const { + MVT VT = getValueType(Ty, true); + // Get expected value type after legalization + switch (VT.getSimpleVT()) { + // Legal load / stores + case MVT::i8: + case MVT::i16: + case MVT::i32: + break; + // Expand i1 -> i8 + case MVT::i1: + VT = MVT::i8; + break; + // Everything else is lowered to words + default: + VT = MVT::i32; + break; + } + if (AM.BaseGV) { + return VT == MVT::i32 && !AM.HasBaseReg && AM.Scale == 0 && + AM.BaseOffs%4 == 0; + } + + switch (VT.getSimpleVT()) { + default: + return false; + case MVT::i8: + // reg + imm + if (AM.Scale == 0) { + return isImmUs(AM.BaseOffs); + } + return AM.Scale == 1 && AM.BaseOffs == 0; + case MVT::i16: + // reg + imm + if (AM.Scale == 0) { + return isImmUs2(AM.BaseOffs); + } + return AM.Scale == 2 && AM.BaseOffs == 0; + case MVT::i32: + // reg + imm + if (AM.Scale == 0) { + return isImmUs4(AM.BaseOffs); + } + // reg + reg<<2 + return AM.Scale == 4 && AM.BaseOffs == 0; + } + + return false; +} + +//===----------------------------------------------------------------------===// +// XCore Inline Assembly Support +//===----------------------------------------------------------------------===// + +std::vector XCoreTargetLowering:: +getRegClassForInlineAsmConstraint(const std::string &Constraint, + MVT VT) const +{ + if (Constraint.size() != 1) + return std::vector(); + + switch (Constraint[0]) { + default : break; + case 'r': + return make_vector(XCore::R0, XCore::R1, XCore::R2, + XCore::R3, XCore::R4, XCore::R5, + XCore::R6, XCore::R7, XCore::R8, + XCore::R9, XCore::R10, XCore::R11, 0); + break; + } + return std::vector(); +} diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.h b/llvm/lib/Target/XCore/XCoreISelLowering.h new file mode 100644 index 000000000000..d2622c1c7c0f --- /dev/null +++ b/llvm/lib/Target/XCore/XCoreISelLowering.h @@ -0,0 +1,119 @@ +//===-- XCoreISelLowering.h - XCore DAG Lowering Interface ------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the interfaces that XCore uses to lower LLVM code into a +// selection DAG. +// +//===----------------------------------------------------------------------===// + +#ifndef XCOREISELLOWERING_H +#define XCOREISELLOWERING_H + +#include "llvm/CodeGen/SelectionDAG.h" +#include "llvm/Target/TargetLowering.h" +#include "XCore.h" + +namespace llvm { + + // Forward delcarations + class XCoreSubtarget; + class XCoreTargetMachine; + + namespace XCoreISD { + enum NodeType { + // Start the numbering where the builtin ops and target ops leave off. + FIRST_NUMBER = ISD::BUILTIN_OP_END+XCore::INSTRUCTION_LIST_END, + + // Branch and link (call) + BL, + + // pc relative address + PCRelativeWrapper, + + // dp relative address + DPRelativeWrapper, + + // cp relative address + CPRelativeWrapper, + + // Store word to stack + STWSP, + + // Corresponds to retsp instruction + RETSP, + + // Corresponds to LADD instruction + LADD, + + // Corresponds to LSUB instruction + LSUB + }; + } + + //===--------------------------------------------------------------------===// + // TargetLowering Implementation + //===--------------------------------------------------------------------===// + class XCoreTargetLowering : public TargetLowering + { + public: + + explicit XCoreTargetLowering(XCoreTargetMachine &TM); + + /// LowerOperation - Provide custom lowering hooks for some operations. + virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG); + + virtual SDNode *ExpandOperationResult(SDNode *N, SelectionDAG &DAG); + + /// getTargetNodeName - This method returns the name of a target specific + // DAG node. + virtual const char *getTargetNodeName(unsigned Opcode) const; + + virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI, + MachineBasicBlock *MBB); + + virtual bool isLegalAddressingMode(const AddrMode &AM, + const Type *Ty) const; + + private: + const XCoreTargetMachine &TM; + const XCoreSubtarget &Subtarget; + + // Lower Operand helpers + SDValue LowerCCCArguments(SDValue Op, SelectionDAG &DAG); + SDValue LowerCCCCallTo(SDValue Op, SelectionDAG &DAG, unsigned CC); + SDNode *LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode*TheCall, + unsigned CallingConv, SelectionDAG &DAG); + SDValue getReturnAddressFrameIndex(SelectionDAG &DAG); + SDValue getGlobalAddressWrapper(SDValue GA, GlobalValue *GV, + SelectionDAG &DAG); + + // Lower Operand specifics + SDValue LowerRET(SDValue Op, SelectionDAG &DAG); + SDValue LowerCALL(SDValue Op, SelectionDAG &DAG); + SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG); + SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG); + SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG); + SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG); + SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG); + SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG); + SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG); + SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG); + SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG); + + // Inline asm support + std::vector + getRegClassForInlineAsmConstraint(const std::string &Constraint, + MVT VT) const; + + // Expand specifics + SDNode *ExpandADDSUB(SDNode *Op, SelectionDAG &DAG); + }; +} + +#endif // XCOREISELLOWERING_H diff --git a/llvm/lib/Target/XCore/XCoreInstrFormats.td b/llvm/lib/Target/XCore/XCoreInstrFormats.td new file mode 100644 index 000000000000..8002c993270c --- /dev/null +++ b/llvm/lib/Target/XCore/XCoreInstrFormats.td @@ -0,0 +1,120 @@ +//===- XCoreInstrFormats.td - XCore Instruction Formats ----*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Instruction format superclass +//===----------------------------------------------------------------------===// +class InstXCore pattern> + : Instruction { + field bits<32> Inst; + + let Namespace = "XCore"; + dag OutOperandList = outs; + dag InOperandList = ins; + let AsmString = asmstr; + let Pattern = pattern; +} + +// XCore pseudo instructions format +class PseudoInstXCore pattern> + : InstXCore; + +//===----------------------------------------------------------------------===// +// Instruction formats +//===----------------------------------------------------------------------===// + +class _F3R pattern> + : InstXCore { + let Inst{31-0} = 0; +} + +class _FL3R pattern> + : InstXCore { + let Inst{31-0} = 0; +} + +class _F2RUS pattern> + : InstXCore { + let Inst{31-0} = 0; +} + +class _FL2RUS pattern> + : InstXCore { + let Inst{31-0} = 0; +} + +class _FRU6 pattern> + : InstXCore { + let Inst{31-0} = 0; +} + +class _FLRU6 pattern> + : InstXCore { + let Inst{31-0} = 0; +} + +class _FU6 pattern> + : InstXCore { + let Inst{31-0} = 0; +} + +class _FLU6 pattern> + : InstXCore { + let Inst{31-0} = 0; +} + +class _FU10 pattern> + : InstXCore { + let Inst{31-0} = 0; +} + +class _FLU10 pattern> + : InstXCore { + let Inst{31-0} = 0; +} + +class _F2R pattern> + : InstXCore { + let Inst{31-0} = 0; +} + +class _FRUS pattern> + : InstXCore { + let Inst{31-0} = 0; +} + +class _FL2R pattern> + : InstXCore { + let Inst{31-0} = 0; +} + +class _F1R pattern> + : InstXCore { + let Inst{31-0} = 0; +} + +class _F0R pattern> + : InstXCore { + let Inst{31-0} = 0; +} + +class _L4R pattern> + : InstXCore { + let Inst{31-0} = 0; +} + +class _L5R pattern> + : InstXCore { + let Inst{31-0} = 0; +} + +class _L6R pattern> + : InstXCore { + let Inst{31-0} = 0; +} diff --git a/llvm/lib/Target/XCore/XCoreInstrInfo.cpp b/llvm/lib/Target/XCore/XCoreInstrInfo.cpp new file mode 100644 index 000000000000..4d0f42a02082 --- /dev/null +++ b/llvm/lib/Target/XCore/XCoreInstrInfo.cpp @@ -0,0 +1,506 @@ +//===- XCoreInstrInfo.cpp - XCore Instruction Information -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the XCore implementation of the TargetInstrInfo class. +// +//===----------------------------------------------------------------------===// + +#include "XCoreMachineFunctionInfo.h" +#include "XCoreInstrInfo.h" +#include "XCore.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineLocation.h" +#include "llvm/CodeGen/MachineModuleInfo.h" +#include "XCoreGenInstrInfo.inc" +#include "llvm/Support/Debug.h" + +namespace llvm { +namespace XCore { + + // XCore Condition Codes + enum CondCode { + COND_TRUE, + COND_FALSE, + COND_INVALID + }; +} +} + +using namespace llvm; + +XCoreInstrInfo::XCoreInstrInfo(void) + : TargetInstrInfoImpl(XCoreInsts, array_lengthof(XCoreInsts)), + RI(*this) { +} + +static bool isZeroImm(const MachineOperand &op) { + return op.isImm() && op.getImm() == 0; +} + +/// Return true if the instruction is a register to register move and +/// leave the source and dest operands in the passed parameters. +/// +bool XCoreInstrInfo::isMoveInstr(const MachineInstr &MI, + unsigned &SrcReg, unsigned &DstReg) const { + // We look for 4 kinds of patterns here: + // add dst, src, 0 + // sub dst, src, 0 + // or dst, src, src + // and dst, src, src + if ((MI.getOpcode() == XCore::ADD_2rus || MI.getOpcode() == XCore::SUB_2rus) + && isZeroImm(MI.getOperand(2))) { + DstReg = MI.getOperand(0).getReg(); + SrcReg = MI.getOperand(1).getReg(); + return true; + } else if ((MI.getOpcode() == XCore::OR_3r || MI.getOpcode() == XCore::AND_3r) + && MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) { + DstReg = MI.getOperand(0).getReg(); + SrcReg = MI.getOperand(1).getReg(); + return true; + } + return false; +} + +/// isLoadFromStackSlot - If the specified machine instruction is a direct +/// load from a stack slot, return the virtual or physical register number of +/// the destination along with the FrameIndex of the loaded stack slot. If +/// not, return 0. This predicate must return 0 if the instruction has +/// any side effects other than loading from the stack slot. +unsigned +XCoreInstrInfo::isLoadFromStackSlot(MachineInstr *MI, int &FrameIndex) const{ + int Opcode = MI->getOpcode(); + if (Opcode == XCore::LDWSP_ru6 || Opcode == XCore::LDWSP_lru6) + { + if ((MI->getOperand(1).isFI()) && // is a stack slot + (MI->getOperand(2).isImm()) && // the imm is zero + (isZeroImm(MI->getOperand(2)))) + { + FrameIndex = MI->getOperand(1).getIndex(); + return MI->getOperand(0).getReg(); + } + } + return 0; +} + + /// isStoreToStackSlot - If the specified machine instruction is a direct + /// store to a stack slot, return the virtual or physical register number of + /// the source reg along with the FrameIndex of the loaded stack slot. If + /// not, return 0. This predicate must return 0 if the instruction has + /// any side effects other than storing to the stack slot. +unsigned +XCoreInstrInfo::isStoreToStackSlot(MachineInstr *MI, int &FrameIndex) const { + int Opcode = MI->getOpcode(); + if (Opcode == XCore::STWSP_ru6 || Opcode == XCore::STWSP_lru6) + { + if ((MI->getOperand(1).isFI()) && // is a stack slot + (MI->getOperand(2).isImm()) && // the imm is zero + (isZeroImm(MI->getOperand(2)))) + { + FrameIndex = MI->getOperand(1).getIndex(); + return MI->getOperand(0).getReg(); + } + } + else if (Opcode == XCore::STWSP_ru6_2 || Opcode == XCore::STWSP_lru6_2) + { + if (MI->getOperand(1).isFI()) + { + FrameIndex = MI->getOperand(1).getIndex(); + return MI->getOperand(0).getReg(); + } + } + return 0; +} + +/// isInvariantLoad - Return true if the specified instruction (which is marked +/// mayLoad) is loading from a location whose value is invariant across the +/// function. For example, loading a value from the constant pool or from +/// from the argument area of a function if it does not change. This should +/// only return true of *all* loads the instruction does are invariant (if it +/// does multiple loads). +bool +XCoreInstrInfo::isInvariantLoad(MachineInstr *MI) const { + // Loads from constants pools and loads from invariant argument slots are + // invariant + int Opcode = MI->getOpcode(); + if (Opcode == XCore::LDWCP_ru6 || Opcode == XCore::LDWCP_lru6) { + return MI->getOperand(1).isCPI(); + } + int FrameIndex; + if (isLoadFromStackSlot(MI, FrameIndex)) { + const MachineFrameInfo &MFI = + *MI->getParent()->getParent()->getFrameInfo(); + return MFI.isFixedObjectIndex(FrameIndex) && + MFI.isImmutableObjectIndex(FrameIndex); + } + return false; +} + +//===----------------------------------------------------------------------===// +// Branch Analysis +//===----------------------------------------------------------------------===// + +static inline bool IsBRU(unsigned BrOpc) { + return BrOpc == XCore::BRFU_u6 + || BrOpc == XCore::BRFU_lu6 + || BrOpc == XCore::BRBU_u6 + || BrOpc == XCore::BRBU_lu6; +} + +static inline bool IsBRT(unsigned BrOpc) { + return BrOpc == XCore::BRFT_ru6 + || BrOpc == XCore::BRFT_lru6 + || BrOpc == XCore::BRBT_ru6 + || BrOpc == XCore::BRBT_lru6; +} + +static inline bool IsBRF(unsigned BrOpc) { + return BrOpc == XCore::BRFF_ru6 + || BrOpc == XCore::BRFF_lru6 + || BrOpc == XCore::BRBF_ru6 + || BrOpc == XCore::BRBF_lru6; +} + +static inline bool IsCondBranch(unsigned BrOpc) { + return IsBRF(BrOpc) || IsBRT(BrOpc); +} + +/// GetCondFromBranchOpc - Return the XCore CC that matches +/// the correspondent Branch instruction opcode. +static XCore::CondCode GetCondFromBranchOpc(unsigned BrOpc) +{ + if (IsBRT(BrOpc)) { + return XCore::COND_TRUE; + } else if (IsBRF(BrOpc)) { + return XCore::COND_FALSE; + } else { + return XCore::COND_INVALID; + } +} + +/// GetCondBranchFromCond - Return the Branch instruction +/// opcode that matches the cc. +static inline unsigned GetCondBranchFromCond(XCore::CondCode CC) +{ + switch (CC) { + default: assert(0 && "Illegal condition code!"); + case XCore::COND_TRUE : return XCore::BRFT_lru6; + case XCore::COND_FALSE : return XCore::BRFF_lru6; + } +} + +/// GetOppositeBranchCondition - Return the inverse of the specified +/// condition, e.g. turning COND_E to COND_NE. +static inline XCore::CondCode GetOppositeBranchCondition(XCore::CondCode CC) +{ + switch (CC) { + default: assert(0 && "Illegal condition code!"); + case XCore::COND_TRUE : return XCore::COND_FALSE; + case XCore::COND_FALSE : return XCore::COND_TRUE; + } +} + +/// AnalyzeBranch - Analyze the branching code at the end of MBB, returning +/// true if it cannot be understood (e.g. it's a switch dispatch or isn't +/// implemented for a target). Upon success, this returns false and returns +/// with the following information in various cases: +/// +/// 1. If this block ends with no branches (it just falls through to its succ) +/// just return false, leaving TBB/FBB null. +/// 2. If this block ends with only an unconditional branch, it sets TBB to be +/// the destination block. +/// 3. If this block ends with an conditional branch and it falls through to +/// an successor block, it sets TBB to be the branch destination block and a +/// list of operands that evaluate the condition. These +/// operands can be passed to other TargetInstrInfo methods to create new +/// branches. +/// 4. If this block ends with an conditional branch and an unconditional +/// block, it returns the 'true' destination in TBB, the 'false' destination +/// in FBB, and a list of operands that evaluate the condition. These +/// operands can be passed to other TargetInstrInfo methods to create new +/// branches. +/// +/// Note that RemoveBranch and InsertBranch must be implemented to support +/// cases where this method returns success. +/// +bool +XCoreInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl &Cond) const { + // If the block has no terminators, it just falls into the block after it. + MachineBasicBlock::iterator I = MBB.end(); + if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) + return false; + + // Get the last instruction in the block. + MachineInstr *LastInst = I; + + // If there is only one terminator instruction, process it. + if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) { + if (IsBRU(LastInst->getOpcode())) { + TBB = LastInst->getOperand(0).getMBB(); + return false; + } + + XCore::CondCode BranchCode = GetCondFromBranchOpc(LastInst->getOpcode()); + if (BranchCode == XCore::COND_INVALID) + return true; // Can't handle indirect branch. + + // Conditional branch + // Block ends with fall-through condbranch. + + TBB = LastInst->getOperand(1).getMBB(); + Cond.push_back(MachineOperand::CreateImm(BranchCode)); + Cond.push_back(LastInst->getOperand(0)); + return false; + } + + // Get the instruction before it if it's a terminator. + MachineInstr *SecondLastInst = I; + + // If there are three terminators, we don't know what sort of block this is. + if (SecondLastInst && I != MBB.begin() && + isUnpredicatedTerminator(--I)) + return true; + + unsigned SecondLastOpc = SecondLastInst->getOpcode(); + XCore::CondCode BranchCode = GetCondFromBranchOpc(SecondLastOpc); + + // If the block ends with conditional branch followed by unconditional, + // handle it. + if (BranchCode != XCore::COND_INVALID + && IsBRU(LastInst->getOpcode())) { + + TBB = SecondLastInst->getOperand(1).getMBB(); + Cond.push_back(MachineOperand::CreateImm(BranchCode)); + Cond.push_back(SecondLastInst->getOperand(0)); + + FBB = LastInst->getOperand(0).getMBB(); + return false; + } + + // If the block ends with two unconditional branches, handle it. The second + // one is not executed, so remove it. + if (IsBRU(SecondLastInst->getOpcode()) && + IsBRU(LastInst->getOpcode())) { + TBB = SecondLastInst->getOperand(0).getMBB(); + I = LastInst; + I->eraseFromParent(); + return false; + } + + // Otherwise, can't handle this. + return true; +} + +unsigned +XCoreInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB, + MachineBasicBlock *FBB, + const SmallVectorImpl &Cond)const{ + // Shouldn't be a fall through. + assert(TBB && "InsertBranch must not be told to insert a fallthrough"); + assert((Cond.size() == 2 || Cond.size() == 0) && + "Unexpected number of components!"); + + if (FBB == 0) { // One way branch. + if (Cond.empty()) { + // Unconditional branch + BuildMI(&MBB, get(XCore::BRFU_lu6)).addMBB(TBB); + } else { + // Conditional branch. + unsigned Opc = GetCondBranchFromCond((XCore::CondCode)Cond[0].getImm()); + BuildMI(&MBB, get(Opc)).addReg(Cond[1].getReg()) + .addMBB(TBB); + } + return 1; + } + + // Two-way Conditional branch. + assert(Cond.size() == 2 && "Unexpected number of components!"); + unsigned Opc = GetCondBranchFromCond((XCore::CondCode)Cond[0].getImm()); + BuildMI(&MBB, get(Opc)).addReg(Cond[1].getReg()) + .addMBB(TBB); + BuildMI(&MBB, get(XCore::BRFU_lu6)).addMBB(FBB); + return 2; +} + +unsigned +XCoreInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { + MachineBasicBlock::iterator I = MBB.end(); + if (I == MBB.begin()) return 0; + --I; + if (!IsBRU(I->getOpcode()) && !IsCondBranch(I->getOpcode())) + return 0; + + // Remove the branch. + I->eraseFromParent(); + + I = MBB.end(); + + if (I == MBB.begin()) return 1; + --I; + if (!IsCondBranch(I->getOpcode())) + return 1; + + // Remove the branch. + I->eraseFromParent(); + return 2; +} + +bool XCoreInstrInfo::copyRegToReg(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + unsigned DestReg, unsigned SrcReg, + const TargetRegisterClass *DestRC, + const TargetRegisterClass *SrcRC) const { + if (DestRC == SrcRC) { + if (DestRC == XCore::GRRegsRegisterClass) { + BuildMI(MBB, I, get(XCore::ADD_2rus), DestReg).addReg(SrcReg).addImm(0); + return true; + } else { + return false; + } + } + + if (SrcRC == XCore::RRegsRegisterClass && SrcReg == XCore::SP && + DestRC == XCore::GRRegsRegisterClass) { + BuildMI(MBB, I, get(XCore::LDAWSP_ru6), DestReg).addImm(0).addImm(0); + return true; + } + if (DestRC == XCore::RRegsRegisterClass && DestReg == XCore::SP && + SrcRC == XCore::GRRegsRegisterClass) { + BuildMI(MBB, I, get(XCore::SETSP_1r)).addReg(SrcReg); + return true; + } + return false; +} + +void XCoreInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + unsigned SrcReg, bool isKill, int FrameIndex, + const TargetRegisterClass *RC) const +{ + BuildMI(MBB, I, get(XCore::STWSP_lru6)).addReg(SrcReg, false, false, isKill) + .addFrameIndex(FrameIndex).addImm(0); +} + +void XCoreInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg, + bool isKill, SmallVectorImpl &Addr, + const TargetRegisterClass *RC, + SmallVectorImpl &NewMIs) const +{ + assert(0 && "unimplemented\n"); +} + +void XCoreInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + unsigned DestReg, int FrameIndex, + const TargetRegisterClass *RC) const +{ + BuildMI(MBB, I, get(XCore::LDWSP_lru6), DestReg).addFrameIndex(FrameIndex) + .addImm(0); +} + +void XCoreInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg, + SmallVectorImpl &Addr, + const TargetRegisterClass *RC, + SmallVectorImpl &NewMIs) const +{ + assert(0 && "unimplemented\n"); +} + +bool XCoreInstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector &CSI) const +{ + if (CSI.empty()) { + return true; + } + MachineFunction *MF = MBB.getParent(); + const MachineFrameInfo *MFI = MF->getFrameInfo(); + MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); + XCoreFunctionInfo *XFI = MF->getInfo(); + + bool emitFrameMoves = XCoreRegisterInfo::needsFrameMoves(*MF); + + for (std::vector::const_iterator it = CSI.begin(); + it != CSI.end(); ++it) { + // Add the callee-saved register as live-in. It's killed at the spill. + MBB.addLiveIn(it->getReg()); + + storeRegToStackSlot(MBB, MI, it->getReg(), true, + it->getFrameIdx(), it->getRegClass()); + if (emitFrameMoves) { + unsigned SaveLabelId = MMI->NextLabelID(); + BuildMI(MBB, MI, get(XCore::DBG_LABEL)).addImm(SaveLabelId); + XFI->getSpillLabels().push_back( + std::pair(SaveLabelId, *it)); + } + } + return true; +} + +bool XCoreInstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector &CSI) const +{ + bool AtStart = MI == MBB.begin(); + MachineBasicBlock::iterator BeforeI = MI; + if (!AtStart) + --BeforeI; + for (std::vector::const_iterator it = CSI.begin(); + it != CSI.end(); ++it) { + + loadRegFromStackSlot(MBB, MI, it->getReg(), + it->getFrameIdx(), + it->getRegClass()); + assert(MI != MBB.begin() && + "loadRegFromStackSlot didn't insert any code!"); + // Insert in reverse order. loadRegFromStackSlot can insert multiple + // instructions. + if (AtStart) + MI = MBB.begin(); + else { + MI = BeforeI; + ++MI; + } + } + return true; +} + +/// BlockHasNoFallThrough - Analyse if MachineBasicBlock does not +/// fall-through into its successor block. +bool XCoreInstrInfo:: +BlockHasNoFallThrough(const MachineBasicBlock &MBB) const +{ + if (MBB.empty()) return false; + + switch (MBB.back().getOpcode()) { + case XCore::RETSP_u6: // Return. + case XCore::RETSP_lu6: + case XCore::BAU_1r: // Indirect branch. + case XCore::BRFU_u6: // Uncond branch. + case XCore::BRFU_lu6: + case XCore::BRBU_u6: + case XCore::BRBU_lu6: + return true; + default: return false; + } +} + +/// ReverseBranchCondition - Return the inverse opcode of the +/// specified Branch instruction. +bool XCoreInstrInfo:: +ReverseBranchCondition(SmallVectorImpl &Cond) const +{ + assert((Cond.size() == 2) && + "Invalid XCore branch condition!"); + Cond[0].setImm(GetOppositeBranchCondition((XCore::CondCode)Cond[0].getImm())); + return false; +} diff --git a/llvm/lib/Target/XCore/XCoreInstrInfo.h b/llvm/lib/Target/XCore/XCoreInstrInfo.h new file mode 100644 index 000000000000..6213f17238da --- /dev/null +++ b/llvm/lib/Target/XCore/XCoreInstrInfo.h @@ -0,0 +1,107 @@ +//===- XCoreInstrInfo.h - XCore Instruction Information ---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the XCore implementation of the TargetInstrInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef XCOREINSTRUCTIONINFO_H +#define XCOREINSTRUCTIONINFO_H + +#include "llvm/Target/TargetInstrInfo.h" +#include "XCoreRegisterInfo.h" + +namespace llvm { + +class XCoreInstrInfo : public TargetInstrInfoImpl { + const XCoreRegisterInfo RI; +public: + XCoreInstrInfo(void); + + /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As + /// such, whenever a client has an instance of instruction info, it should + /// always be able to get register info as well (through this method). + /// + virtual const TargetRegisterInfo &getRegisterInfo() const { return RI; } + + /// Return true if the instruction is a register to register move and + /// leave the source and dest operands in the passed parameters. + /// + virtual bool isMoveInstr(const MachineInstr &MI, + unsigned &SrcReg, unsigned &DstReg) const; + + /// isLoadFromStackSlot - If the specified machine instruction is a direct + /// load from a stack slot, return the virtual or physical register number of + /// the destination along with the FrameIndex of the loaded stack slot. If + /// not, return 0. This predicate must return 0 if the instruction has + /// any side effects other than loading from the stack slot. + virtual unsigned isLoadFromStackSlot(MachineInstr *MI, int &FrameIndex) const; + + /// isStoreToStackSlot - If the specified machine instruction is a direct + /// store to a stack slot, return the virtual or physical register number of + /// the source reg along with the FrameIndex of the loaded stack slot. If + /// not, return 0. This predicate must return 0 if the instruction has + /// any side effects other than storing to the stack slot. + virtual unsigned isStoreToStackSlot(MachineInstr *MI, int &FrameIndex) const; + + virtual bool isInvariantLoad(MachineInstr *MI) const; + + virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl &Cond) const; + + virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, + MachineBasicBlock *FBB, + const SmallVectorImpl &Cond) const; + + virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const; + + virtual bool copyRegToReg(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + unsigned DestReg, unsigned SrcReg, + const TargetRegisterClass *DestRC, + const TargetRegisterClass *SrcRC) const; + + virtual void storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned SrcReg, bool isKill, int FrameIndex, + const TargetRegisterClass *RC) const; + + virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill, + SmallVectorImpl &Addr, + const TargetRegisterClass *RC, + SmallVectorImpl &NewMIs) const; + + virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned DestReg, int FrameIndex, + const TargetRegisterClass *RC) const; + + virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg, + SmallVectorImpl &Addr, + const TargetRegisterClass *RC, + SmallVectorImpl &NewMIs) const; + + virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector &CSI) const; + + virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector &CSI) const; + + virtual bool BlockHasNoFallThrough(const MachineBasicBlock &MBB) const; + + virtual bool ReverseBranchCondition( + SmallVectorImpl &Cond) const; +}; + +} + +#endif diff --git a/llvm/lib/Target/XCore/XCoreInstrInfo.td b/llvm/lib/Target/XCore/XCoreInstrInfo.td new file mode 100644 index 000000000000..15e51f51b119 --- /dev/null +++ b/llvm/lib/Target/XCore/XCoreInstrInfo.td @@ -0,0 +1,980 @@ +//===- XCoreInstrInfo.td - Target Description for XCore ----*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes the XCore instructions in TableGen format. +// +//===----------------------------------------------------------------------===// + +// Uses of CP, DP are not currently reflected in the patterns, since +// having a physical register as an operand prevents loop hoisting and +// since the value of these registers never changes during the life of the +// function. + +//===----------------------------------------------------------------------===// +// Instruction format superclass. +//===----------------------------------------------------------------------===// + +include "XCoreInstrFormats.td" + +//===----------------------------------------------------------------------===// +// Feature predicates. +//===----------------------------------------------------------------------===// + +// HasXS1A - This predicate is true when the target processor supports XS1A +// instructions. +def HasXS1A : Predicate<"Subtarget.isXS1A()">; + +// HasXS1B - This predicate is true when the target processor supports XS1B +// instructions. +def HasXS1B : Predicate<"Subtarget.isXS1B()">; + +//===----------------------------------------------------------------------===// +// XCore specific DAG Nodes. +// + +// Call +def SDT_XCoreBranchLink : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>; +def XCoreBranchLink : SDNode<"XCoreISD::BL",SDT_XCoreBranchLink, + [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>; + +def XCoreRetsp : SDNode<"XCoreISD::RETSP", SDTNone, + [SDNPHasChain, SDNPOptInFlag]>; + +def SDT_XCoreAddress : SDTypeProfile<1, 1, + [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>; + +def pcrelwrapper : SDNode<"XCoreISD::PCRelativeWrapper", SDT_XCoreAddress, + []>; + +def dprelwrapper : SDNode<"XCoreISD::DPRelativeWrapper", SDT_XCoreAddress, + []>; + +def cprelwrapper : SDNode<"XCoreISD::CPRelativeWrapper", SDT_XCoreAddress, + []>; + +def SDT_XCoreStwsp : SDTypeProfile<0, 2, [SDTCisInt<1>]>; +def XCoreStwsp : SDNode<"XCoreISD::STWSP", SDT_XCoreStwsp, + [SDNPHasChain]>; + +// These are target-independent nodes, but have target-specific formats. +def SDT_XCoreCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>; +def SDT_XCoreCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>, + SDTCisVT<1, i32> ]>; + +def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_XCoreCallSeqStart, + [SDNPHasChain, SDNPOutFlag]>; +def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_XCoreCallSeqEnd, + [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>; + +//===----------------------------------------------------------------------===// +// Instruction Pattern Stuff +//===----------------------------------------------------------------------===// + +def div4_xform : SDNodeXFormgetZExtValue() % 4 == 0); + return getI32Imm(N->getZExtValue()/4); +}]>; + +def msksize_xform : SDNodeXFormgetZExtValue())); + // look for the first non-zero bit + return getI32Imm(32 - CountLeadingZeros_32(N->getZExtValue())); +}]>; + +def neg_xform : SDNodeXFormgetZExtValue(); + return getI32Imm(-value); +}]>; + +def div4neg_xform : SDNodeXFormgetZExtValue(); + assert(-value % 4 == 0); + return getI32Imm(-value/4); +}]>; + +def immUs4Neg : PatLeaf<(imm), [{ + uint32_t value = (uint32_t)N->getZExtValue(); + return (-value)%4 == 0 && (-value)/4 <= 11; +}]>; + +def immUs4 : PatLeaf<(imm), [{ + uint32_t value = (uint32_t)N->getZExtValue(); + return value%4 == 0 && value/4 <= 11; +}]>; + +def immUsNeg : PatLeaf<(imm), [{ + return -((uint32_t)N->getZExtValue()) <= 11; +}]>; + +def immUs : PatLeaf<(imm), [{ + return (uint32_t)N->getZExtValue() <= 11; +}]>; + +def immU6 : PatLeaf<(imm), [{ + return (uint32_t)N->getZExtValue() < (1 << 6); +}]>; + +def immU10 : PatLeaf<(imm), [{ + return (uint32_t)N->getZExtValue() < (1 << 10); +}]>; + +def immU16 : PatLeaf<(imm), [{ + return (uint32_t)N->getZExtValue() < (1 << 16); +}]>; + +def immU20 : PatLeaf<(imm), [{ + return (uint32_t)N->getZExtValue() < (1 << 20); +}]>; + +// FIXME check subtarget. Currently we check if the immediate +// is in the common subset of legal immediate values for both +// XS1A and XS1B. +def immMskBitp : PatLeaf<(imm), [{ + uint32_t value = (uint32_t)N->getZExtValue(); + if (!isMask_32(value)) { + return false; + } + int msksize = 32 - CountLeadingZeros_32(value); + return (msksize >= 1 && msksize <= 8) + || msksize == 16 + || msksize == 24 + || msksize == 32; +}]>; + +// FIXME check subtarget. Currently we check if the immediate +// is in the common subset of legal immediate values for both +// XS1A and XS1B. +def immBitp : PatLeaf<(imm), [{ + uint32_t value = (uint32_t)N->getZExtValue(); + return (value >= 1 && value <= 8) + || value == 16 + || value == 24 + || value == 32; +}]>; + +def lda16f : PatFrag<(ops node:$addr, node:$offset), + (add node:$addr, (shl node:$offset, 1))>; +def lda16b : PatFrag<(ops node:$addr, node:$offset), + (sub node:$addr, (shl node:$offset, 1))>; +def ldawf : PatFrag<(ops node:$addr, node:$offset), + (add node:$addr, (shl node:$offset, 2))>; +def ldawb : PatFrag<(ops node:$addr, node:$offset), + (sub node:$addr, (shl node:$offset, 2))>; + +// Instruction operand types +def calltarget : Operand; +def brtarget : Operand; +def pclabel : Operand; + +// Addressing modes +def ADDRspii : ComplexPattern; +def ADDRdpii : ComplexPattern; +def ADDRcpii : ComplexPattern; + +// Address operands +def MEMii : Operand { + let PrintMethod = "printMemOperand"; + let MIOperandInfo = (ops i32imm, i32imm); +} + +//===----------------------------------------------------------------------===// +// Instruction Class Templates +//===----------------------------------------------------------------------===// + +// Three operand short + +multiclass F3R_2RUS { + def _3r: _F3R< + (outs GRRegs:$dst), (ins GRRegs:$b, GRRegs:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + [(set GRRegs:$dst, (OpNode GRRegs:$b, GRRegs:$c))]>; + def _2rus : _F2RUS< + (outs GRRegs:$dst), (ins GRRegs:$b, i32imm:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + [(set GRRegs:$dst, (OpNode GRRegs:$b, immUs:$c))]>; +} + +multiclass F3R_2RUS_np { + def _3r: _F3R< + (outs GRRegs:$dst), (ins GRRegs:$b, GRRegs:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + []>; + def _2rus : _F2RUS< + (outs GRRegs:$dst), (ins GRRegs:$b, i32imm:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + []>; +} + +multiclass F3R_2RBITP { + def _3r: _F3R< + (outs GRRegs:$dst), (ins GRRegs:$b, GRRegs:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + [(set GRRegs:$dst, (OpNode GRRegs:$b, GRRegs:$c))]>; + def _2rus : _F2RUS< + (outs GRRegs:$dst), (ins GRRegs:$b, i32imm:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + [(set GRRegs:$dst, (OpNode GRRegs:$b, immBitp:$c))]>; +} + +class F3R : _F3R< + (outs GRRegs:$dst), (ins GRRegs:$b, GRRegs:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + [(set GRRegs:$dst, (OpNode GRRegs:$b, GRRegs:$c))]>; + +class F3R_np : _F3R< + (outs GRRegs:$dst), (ins GRRegs:$b, GRRegs:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + []>; +// Three operand long + +/// FL3R_L2RUS multiclass - Define a normal FL3R/FL2RUS pattern in one shot. +multiclass FL3R_L2RUS { + def _l3r: _FL3R< + (outs GRRegs:$dst), (ins GRRegs:$b, GRRegs:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + [(set GRRegs:$dst, (OpNode GRRegs:$b, GRRegs:$c))]>; + def _l2rus : _FL2RUS< + (outs GRRegs:$dst), (ins GRRegs:$b, i32imm:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + [(set GRRegs:$dst, (OpNode GRRegs:$b, immUs:$c))]>; +} + +/// FL3R_L2RUS multiclass - Define a normal FL3R/FL2RUS pattern in one shot. +multiclass FL3R_L2RBITP { + def _l3r: _FL3R< + (outs GRRegs:$dst), (ins GRRegs:$b, GRRegs:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + [(set GRRegs:$dst, (OpNode GRRegs:$b, GRRegs:$c))]>; + def _l2rus : _FL2RUS< + (outs GRRegs:$dst), (ins GRRegs:$b, i32imm:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + [(set GRRegs:$dst, (OpNode GRRegs:$b, immBitp:$c))]>; +} + +class FL3R : _FL3R< + (outs GRRegs:$dst), (ins GRRegs:$b, GRRegs:$c), + !strconcat(OpcStr, " $dst, $b, $c"), + [(set GRRegs:$dst, (OpNode GRRegs:$b, GRRegs:$c))]>; + +// Register - U6 +// Operand register - U6 +multiclass FRU6_LRU6_branch { + def _ru6: _FRU6< + (outs), (ins GRRegs:$cond, brtarget:$dest), + !strconcat(OpcStr, " $cond, $dest"), + []>; + def _lru6: _FLRU6< + (outs), (ins GRRegs:$cond, brtarget:$dest), + !strconcat(OpcStr, " $cond, $dest"), + []>; +} + +multiclass FRU6_LRU6_cp { + def _ru6: _FRU6< + (outs GRRegs:$dst), (ins i32imm:$a), + !strconcat(OpcStr, " $dst, cp[$a]"), + []>; + def _lru6: _FLRU6< + (outs GRRegs:$dst), (ins i32imm:$a), + !strconcat(OpcStr, " $dst, cp[$a]"), + []>; +} + +// U6 +multiclass FU6_LU6 { + def _u6: _FU6< + (outs), (ins i32imm:$b), + !strconcat(OpcStr, " $b"), + [(OpNode immU6:$b)]>; + def _lu6: _FLU6< + (outs), (ins i32imm:$b), + !strconcat(OpcStr, " $b"), + [(OpNode immU16:$b)]>; +} + +multiclass FU6_LU6_np { + def _u6: _FU6< + (outs), (ins i32imm:$b), + !strconcat(OpcStr, " $b"), + []>; + def _lu6: _FLU6< + (outs), (ins i32imm:$b), + !strconcat(OpcStr, " $b"), + []>; +} + +// U10 +multiclass FU10_LU10_np { + def _u10: _FU10< + (outs), (ins i32imm:$b), + !strconcat(OpcStr, " $b"), + []>; + def _lu10: _FLU10< + (outs), (ins i32imm:$b), + !strconcat(OpcStr, " $b"), + []>; +} + +// Two operand short + +class F2R_np : _F2R< + (outs GRRegs:$dst), (ins GRRegs:$b), + !strconcat(OpcStr, " $dst, $b"), + []>; + +// Two operand long + +//===----------------------------------------------------------------------===// +// Pseudo Instructions +//===----------------------------------------------------------------------===// + +let Defs = [SP], Uses = [SP] in { +def ADJCALLSTACKDOWN : PseudoInstXCore<(outs), (ins i32imm:$amt), + "${:comment} ADJCALLSTACKDOWN $amt", + [(callseq_start timm:$amt)]>; +def ADJCALLSTACKUP : PseudoInstXCore<(outs), (ins i32imm:$amt1, i32imm:$amt2), + "${:comment} ADJCALLSTACKUP $amt1", + [(callseq_end timm:$amt1, timm:$amt2)]>; +} + +// SELECT_CC_* - Used to implement the SELECT_CC DAG operation. Expanded by the +// scheduler into a branch sequence. +let usesCustomDAGSchedInserter = 1 in { + def SELECT_CC : PseudoInstXCore<(outs GRRegs:$dst), + (ins GRRegs:$cond, GRRegs:$T, GRRegs:$F), + "${:comment} SELECT_CC PSEUDO!", + [(set GRRegs:$dst, + (select GRRegs:$cond, GRRegs:$T, GRRegs:$F))]>; +} + +//===----------------------------------------------------------------------===// +// Instructions +//===----------------------------------------------------------------------===// + +// Three operand short +defm ADD : F3R_2RUS<"add", add>; +defm SUB : F3R_2RUS<"sub", sub>; +let neverHasSideEffects = 1 in { +defm EQ : F3R_2RUS_np<"eq">; +def LSS_3r : F3R_np<"lss">; +def LSU_3r : F3R_np<"lsu">; +} +def AND_3r : F3R<"and", and>; +def OR_3r : F3R<"or", or>; + +let mayLoad=1 in { +def LDW_3r : _F3R<(outs GRRegs:$dst), (ins GRRegs:$addr, GRRegs:$offset), + "ldw $dst, $addr[$offset]", + []>; + +def LDW_2rus : _F2RUS<(outs GRRegs:$dst), (ins GRRegs:$addr, i32imm:$offset), + "ldw $dst, $addr[$offset]", + []>; + +def LD16S_3r : _F3R<(outs GRRegs:$dst), (ins GRRegs:$addr, GRRegs:$offset), + "ld16s $dst, $addr[$offset]", + []>; + +def LD8U_3r : _F3R<(outs GRRegs:$dst), (ins GRRegs:$addr, GRRegs:$offset), + "ld8u $dst, $addr[$offset]", + []>; +} + +let mayStore=1 in { +def STW_3r : _F3R<(outs), (ins GRRegs:$val, GRRegs:$addr, GRRegs:$offset), + "stw $val, $addr[$offset]", + []>; + +def STW_2rus : _F2RUS<(outs), (ins GRRegs:$val, GRRegs:$addr, i32imm:$offset), + "stw $val, $addr[$offset]", + []>; +} + +defm SHL : F3R_2RBITP<"shl", shl>; +defm SHR : F3R_2RBITP<"shr", srl>; +// TODO tsetr + +// Three operand long +def LDAWF_l3r : _FL3R<(outs GRRegs:$dst), (ins GRRegs:$addr, GRRegs:$offset), + "ldaw $dst, $addr[$offset]", + [(set GRRegs:$dst, (ldawf GRRegs:$addr, GRRegs:$offset))]>; + +let neverHasSideEffects = 1 in +def LDAWF_l2rus : _FL2RUS<(outs GRRegs:$dst), + (ins GRRegs:$addr, i32imm:$offset), + "ldaw $dst, $addr[$offset]", + []>; + +def LDAWB_l3r : _FL3R<(outs GRRegs:$dst), (ins GRRegs:$addr, GRRegs:$offset), + "ldaw $dst, $addr[-$offset]", + [(set GRRegs:$dst, (ldawb GRRegs:$addr, GRRegs:$offset))]>; + +let neverHasSideEffects = 1 in +def LDAWB_l2rus : _FL2RUS<(outs GRRegs:$dst), + (ins GRRegs:$addr, i32imm:$offset), + "ldaw $dst, $addr[-$offset]", + []>; + +def LDA16F_l3r : _FL3R<(outs GRRegs:$dst), (ins GRRegs:$addr, GRRegs:$offset), + "lda16 $dst, $addr[$offset]", + [(set GRRegs:$dst, (lda16f GRRegs:$addr, GRRegs:$offset))]>; + +def LDA16B_l3r : _FL3R<(outs GRRegs:$dst), (ins GRRegs:$addr, GRRegs:$offset), + "lda16 $dst, $addr[-$offset]", + [(set GRRegs:$dst, (lda16b GRRegs:$addr, GRRegs:$offset))]>; + +def MUL_l3r : FL3R<"mul", mul>; +// Instructions which may trap are marked as side effecting. +let hasSideEffects = 1 in { +def DIVS_l3r : FL3R<"divs", sdiv>; +def DIVU_l3r : FL3R<"divu", udiv>; +def REMS_l3r : FL3R<"rems", srem>; +def REMU_l3r : FL3R<"remu", urem>; +} +def XOR_l3r : FL3R<"xor", xor>; +defm ASHR : FL3R_L2RBITP<"ashr", sra>; +// TODO crc32, crc8, inpw, outpw +let mayStore=1 in { +def ST16_l3r : _FL3R<(outs), (ins GRRegs:$val, GRRegs:$addr, GRRegs:$offset), + "st16 $val, $addr[$offset]", + []>; + +def ST8_l3r : _FL3R<(outs), (ins GRRegs:$val, GRRegs:$addr, GRRegs:$offset), + "st8 $val, $addr[$offset]", + []>; +} + +// Four operand long +let Predicates = [HasXS1B], Constraints = "$src1 = $dst1,$src2 = $dst2" in { +def MACCU_l4r : _L4R<(outs GRRegs:$dst1, GRRegs:$dst2), + (ins GRRegs:$src1, GRRegs:$src2, GRRegs:$src3, + GRRegs:$src4), + "maccu $dst1, $dst2, $src3, $src4", + []>; + +def MACCS_l4r : _L4R<(outs GRRegs:$dst1, GRRegs:$dst2), + (ins GRRegs:$src1, GRRegs:$src2, GRRegs:$src3, + GRRegs:$src4), + "maccs $dst1, $dst2, $src3, $src4", + []>; +} + +// Five operand long + +let Predicates = [HasXS1B] in { +def LADD_l5r : _L5R<(outs GRRegs:$dst1, GRRegs:$dst2), + (ins GRRegs:$src1, GRRegs:$src2, GRRegs:$src3), + "ladd $dst1, $dst2, $src1, $src2, $src3", + []>; + +def LSUB_l5r : _L5R<(outs GRRegs:$dst1, GRRegs:$dst2), + (ins GRRegs:$src1, GRRegs:$src2, GRRegs:$src3), + "lsub $dst1, $dst2, $src1, $src2, $src3", + []>; + +def LDIV_l5r : _L5R<(outs GRRegs:$dst1, GRRegs:$dst2), + (ins GRRegs:$src1, GRRegs:$src2, GRRegs:$src3), + "ldiv $dst1, $dst2, $src1, $src2, $src3", + []>; +} + +// Six operand long + +def LMUL_l6r : _L6R<(outs GRRegs:$dst1, GRRegs:$dst2), + (ins GRRegs:$src1, GRRegs:$src2, GRRegs:$src3, + GRRegs:$src4), + "lmul $dst1, $dst2, $src1, $src2, $src3, $src4", + []>; + +let Predicates = [HasXS1A] in +def MACC_l6r : _L6R<(outs GRRegs:$dst1, GRRegs:$dst2), + (ins GRRegs:$src1, GRRegs:$src2, GRRegs:$src3, + GRRegs:$src4), + "macc $dst1, $dst2, $src1, $src2, $src3, $src4", + []>; + +// Register - U6 + +//let Uses = [DP] in ... +let neverHasSideEffects = 1, isReMaterializable = 1 in +def LDAWDP_ru6: _FRU6<(outs GRRegs:$dst), (ins MEMii:$a), + "ldaw $dst, dp[$a]", + []>; + +let isReMaterializable = 1 in +def LDAWDP_lru6: _FLRU6< + (outs GRRegs:$dst), (ins MEMii:$a), + "ldaw $dst, dp[$a]", + [(set GRRegs:$dst, ADDRdpii:$a)]>; + +let mayLoad=1 in +def LDWDP_ru6: _FRU6<(outs GRRegs:$dst), (ins MEMii:$a), + "ldw $dst, dp[$a]", + []>; + +def LDWDP_lru6: _FLRU6< + (outs GRRegs:$dst), (ins MEMii:$a), + "ldw $dst, dp[$a]", + [(set GRRegs:$dst, (load ADDRdpii:$a))]>; + +let mayStore=1 in +def STWDP_ru6 : _FRU6<(outs), (ins GRRegs:$val, MEMii:$addr), + "stw $val, dp[$addr]", + []>; + +def STWDP_lru6 : _FLRU6<(outs), (ins GRRegs:$val, MEMii:$addr), + "stw $val, dp[$addr]", + [(store GRRegs:$val, ADDRdpii:$addr)]>; + +//let Uses = [CP] in .. +let mayLoad = 1, isReMaterializable = 1 in +defm LDWCP : FRU6_LRU6_cp<"ldw">; + +let Uses = [SP] in { +let mayStore=1 in +def STWSP_ru6 : _FRU6< + (outs), (ins GRRegs:$dst, MEMii:$b), + "stw $dst, sp[$b]", + []>; + +def STWSP_lru6 : _FLRU6< + (outs), (ins GRRegs:$dst, MEMii:$b), + "stw $dst, sp[$b]", + [(store GRRegs:$dst, ADDRspii:$b)]>; + +let mayStore=1 in +def STWSP_ru6_2 : _FRU6< + (outs), (ins GRRegs:$dst, i32imm:$b), + "stw $dst, sp[$b]", + []>; + +def STWSP_lru6_2 : _FLRU6< + (outs), (ins GRRegs:$dst, i32imm:$b), + "stw $dst, sp[$b]", + [(store GRRegs:$dst, ADDRspii:$b)]>; + +let mayLoad=1 in +def LDWSP_ru6 : _FRU6< + (outs GRRegs:$dst), (ins MEMii:$b), + "ldw $dst, sp[$b]", + []>; + +def LDWSP_lru6 : _FLRU6< + (outs GRRegs:$dst), (ins MEMii:$b), + "ldw $dst, sp[$b]", + [(set GRRegs:$dst, (load ADDRspii:$b))]>; + +let neverHasSideEffects = 1 in +def LDAWSP_ru6 : _FRU6< + (outs GRRegs:$dst), (ins MEMii:$b), + "ldaw $dst, sp[$b]", + []>; + +def LDAWSP_lru6 : _FLRU6< + (outs GRRegs:$dst), (ins MEMii:$b), + "ldaw $dst, sp[$b]", + [(set GRRegs: $dst, ADDRspii:$b)]>; + +let neverHasSideEffects = 1 in { +def LDAWSP_ru6_RRegs : _FRU6< + (outs RRegs:$dst), (ins i32imm:$b), + "ldaw $dst, sp[$b]", + []>; + +def LDAWSP_lru6_RRegs : _FLRU6< + (outs RRegs:$dst), (ins i32imm:$b), + "ldaw $dst, sp[$b]", + []>; +} +} + +let isReMaterializable = 1 in { +def LDC_ru6 : _FRU6< + (outs GRRegs:$dst), (ins i32imm:$b), + "ldc $dst, $b", + [(set GRRegs:$dst, immU6:$b)]>; + +def LDC_lru6 : _FLRU6< + (outs GRRegs:$dst), (ins i32imm:$b), + "ldc $dst, $b", + [(set GRRegs:$dst, immU16:$b)]>; +} + +// Operand register - U6 +// TODO setc +let isBranch = 1, isTerminator = 1 in { +defm BRFT: FRU6_LRU6_branch<"bt">; +defm BRBT: FRU6_LRU6_branch<"bt">; +defm BRFF: FRU6_LRU6_branch<"bf">; +defm BRBF: FRU6_LRU6_branch<"bf">; +} + +// U6 +let Defs = [SP], Uses = [SP] in { +let neverHasSideEffects = 1 in +defm EXTSP : FU6_LU6_np<"extsp">; +let mayStore = 1 in +defm ENTSP : FU6_LU6_np<"entsp">; + +let isReturn = 1, isTerminator = 1, mayLoad = 1 in { +defm RETSP : FU6_LU6<"retsp", XCoreRetsp>; +} +} + +// TODO extdp, kentsp, krestsp, blat, setsr +// clrsr, getsr, kalli +let isBranch = 1, isTerminator = 1 in { +def BRBU_u6 : _FU6< + (outs), + (ins brtarget:$target), + "bu $target", + []>; + +def BRBU_lu6 : _FLU6< + (outs), + (ins brtarget:$target), + "bu $target", + []>; + +def BRFU_u6 : _FU6< + (outs), + (ins brtarget:$target), + "bu $target", + []>; + +def BRFU_lu6 : _FLU6< + (outs), + (ins brtarget:$target), + "bu $target", + []>; +} + +//let Uses = [CP] in ... +let Predicates = [HasXS1B], Defs = [R11], neverHasSideEffects = 1, + isReMaterializable = 1 in +def LDAWCP_u6: _FRU6<(outs), (ins MEMii:$a), + "ldaw r11, cp[$a]", + []>; + +let Predicates = [HasXS1B], Defs = [R11], isReMaterializable = 1 in +def LDAWCP_lu6: _FLRU6< + (outs), (ins MEMii:$a), + "ldaw r11, cp[$a]", + [(set R11, ADDRcpii:$a)]>; + +// U10 +// TODO ldwcpl, blacp + +let Defs = [R11], isReMaterializable = 1, neverHasSideEffects = 1 in +def LDAP_u10 : _FU10< + (outs), + (ins i32imm:$addr), + "ldap r11, $addr", + []>; + +let Defs = [R11], isReMaterializable = 1 in +def LDAP_lu10 : _FLU10< + (outs), + (ins i32imm:$addr), + "ldap r11, $addr", + [(set R11, (pcrelwrapper tglobaladdr:$addr))]>; + +let isCall=1, +// All calls clobber the the link register and the non-callee-saved registers: +Defs = [R0, R1, R2, R3, R11, LR] in { +def BL_u10 : _FU10< + (outs), + (ins calltarget:$target, variable_ops), + "bl $target", + [(XCoreBranchLink immU10:$target)]>; + +def BL_lu10 : _FLU10< + (outs), + (ins calltarget:$target, variable_ops), + "bl $target", + [(XCoreBranchLink immU20:$target)]>; +} + +// Two operand short +// TODO getr, getst +def NOT : _F2R<(outs GRRegs:$dst), (ins GRRegs:$b), + "not $dst, $b", + [(set GRRegs:$dst, (not GRRegs:$b))]>; + +def NEG : _F2R<(outs GRRegs:$dst), (ins GRRegs:$b), + "neg $dst, $b", + [(set GRRegs:$dst, (ineg GRRegs:$b))]>; + +// TODO setd, eet, eef, getts, setpt, outct, inct, chkct, outt, intt, out, +// in, outshr, inshr, testct, testwct, tinitpc, tinitdp, tinitsp, tinitcp, +// tsetmr, sext (reg), zext (reg) +let isTwoAddress = 1 in { +let neverHasSideEffects = 1 in +def SEXT_rus : _FRUS<(outs GRRegs:$dst), (ins GRRegs:$src1, i32imm:$src2), + "sext $dst, $src2", + []>; + +let neverHasSideEffects = 1 in +def ZEXT_rus : _FRUS<(outs GRRegs:$dst), (ins GRRegs:$src1, i32imm:$src2), + "zext $dst, $src2", + []>; + +def ANDNOT_2r : _F2R<(outs GRRegs:$dst), (ins GRRegs:$src1, GRRegs:$src2), + "andnot $dst, $src2", + [(set GRRegs:$dst, (and GRRegs:$src1, (not GRRegs:$src2)))]>; +} + +let isReMaterializable = 1, neverHasSideEffects = 1 in +def MKMSK_rus : _FRUS<(outs GRRegs:$dst), (ins i32imm:$size), + "mkmsk $dst, $size", + []>; + +def MKMSK_2r : _FRUS<(outs GRRegs:$dst), (ins GRRegs:$size), + "mkmsk $dst, $size", + [(set GRRegs:$dst, (add (shl 1, GRRegs:$size), 0xffffffff))]>; + +// Two operand long +// TODO settw, setclk, setrdy, setpsc, endin, peek, +// getd, testlcl, tinitlr, getps, setps +def BITREV_l2r : _FL2R<(outs GRRegs:$dst), (ins GRRegs:$src), + "bitrev $dst, $src", + []>; + +def BYTEREV_l2r : _FL2R<(outs GRRegs:$dst), (ins GRRegs:$src), + "byterev $dst, $src", + [(set GRRegs:$dst, (bswap GRRegs:$src))]>; + +def CLZ_l2r : _FL2R<(outs GRRegs:$dst), (ins GRRegs:$src), + "clz $dst, $src", + [(set GRRegs:$dst, (ctlz GRRegs:$src))]>; + +// One operand short +// TODO edu, eeu, waitet, waitef, freer, tstart, msync, mjoin, syncr, clrtp +// bru, setdp, setcp, setv, setev, kcall, ecallt, ecallf +// dgetreg +let isBranch=1, isIndirectBranch=1, isTerminator=1 in +def BAU_1r : _F1R<(outs), (ins GRRegs:$addr), + "bau $addr", + [(brind GRRegs:$addr)]>; + +let Defs=[SP], neverHasSideEffects=1 in +def SETSP_1r : _F1R<(outs), (ins GRRegs:$src), + "set sp, $src", + []>; + +let isCall=1, +// All calls clobber the the link register and the non-callee-saved registers: +Defs = [R0, R1, R2, R3, R11, LR] in { +def BLA_1r : _F1R<(outs), (ins GRRegs:$addr, variable_ops), + "bla $addr", + [(XCoreBranchLink GRRegs:$addr)]>; +} + +// Zero operand short +// TODO waiteu, clre, ssync, freet, ldspc, stspc, ldssr, stssr, ldsed, stsed, +// stet, geted, getet, getkep, getksp, setkep, getid, kret, dcall, dret, +// dentsp, drestsp + +let Defs = [R11] in +def GETID_0R : _F0R<(outs), (ins), + "get r11, id", + []>; + +//===----------------------------------------------------------------------===// +// Non-Instruction Patterns +//===----------------------------------------------------------------------===// + +def : Pat<(XCoreBranchLink tglobaladdr:$addr), (BL_lu10 tglobaladdr:$addr)>; +def : Pat<(XCoreBranchLink texternalsym:$addr), (BL_lu10 texternalsym:$addr)>; +def : Pat<(XCoreStwsp GRRegs:$val, immU6:$index), + (STWSP_ru6_2 GRRegs:$val, immU6:$index)>; +def : Pat<(XCoreStwsp GRRegs:$val, immU16:$index), + (STWSP_lru6_2 GRRegs:$val, immU16:$index)>; + +/// sext_inreg +def : Pat<(sext_inreg GRRegs:$b, i1), (SEXT_rus GRRegs:$b, 1)>; +def : Pat<(sext_inreg GRRegs:$b, i8), (SEXT_rus GRRegs:$b, 8)>; +def : Pat<(sext_inreg GRRegs:$b, i16), (SEXT_rus GRRegs:$b, 16)>; + +/// loads +def : Pat<(zextloadi8 (add GRRegs:$addr, GRRegs:$offset)), + (LD8U_3r GRRegs:$addr, GRRegs:$offset)>; +def : Pat<(zextloadi8 GRRegs:$addr), (LD8U_3r GRRegs:$addr, (LDC_ru6 0))>; + +def : Pat<(zextloadi16 (lda16f GRRegs:$addr, GRRegs:$offset)), + (LD16S_3r GRRegs:$addr, GRRegs:$offset)>; +def : Pat<(sextloadi16 GRRegs:$addr), (LD16S_3r GRRegs:$addr, (LDC_ru6 0))>; + +def : Pat<(load (ldawf GRRegs:$addr, GRRegs:$offset)), + (LDW_3r GRRegs:$addr, GRRegs:$offset)>; +def : Pat<(load (add GRRegs:$addr, immUs4:$offset)), + (LDW_2rus GRRegs:$addr, (div4_xform immUs4:$offset))>; +def : Pat<(load GRRegs:$addr), (LDW_2rus GRRegs:$addr, 0)>; + +/// anyext +def : Pat<(extloadi8 (add GRRegs:$addr, GRRegs:$offset)), + (LD8U_3r GRRegs:$addr, GRRegs:$offset)>; +def : Pat<(extloadi8 GRRegs:$addr), (LD8U_3r GRRegs:$addr, (LDC_ru6 0))>; +def : Pat<(extloadi16 (lda16f GRRegs:$addr, GRRegs:$offset)), + (LD16S_3r GRRegs:$addr, GRRegs:$offset)>; +def : Pat<(extloadi16 GRRegs:$addr), (LD16S_3r GRRegs:$addr, (LDC_ru6 0))>; + +/// stores +def : Pat<(truncstorei8 GRRegs:$val, (add GRRegs:$addr, GRRegs:$offset)), + (ST8_l3r GRRegs:$val, GRRegs:$addr, GRRegs:$offset)>; +def : Pat<(truncstorei8 GRRegs:$val, GRRegs:$addr), + (ST8_l3r GRRegs:$val, GRRegs:$addr, (LDC_ru6 0))>; + +def : Pat<(truncstorei16 GRRegs:$val, (lda16f GRRegs:$addr, GRRegs:$offset)), + (ST16_l3r GRRegs:$val, GRRegs:$addr, GRRegs:$offset)>; +def : Pat<(truncstorei16 GRRegs:$val, GRRegs:$addr), + (ST16_l3r GRRegs:$val, GRRegs:$addr, (LDC_ru6 0))>; + +def : Pat<(store GRRegs:$val, (ldawf GRRegs:$addr, GRRegs:$offset)), + (STW_3r GRRegs:$val, GRRegs:$addr, GRRegs:$offset)>; +def : Pat<(store GRRegs:$val, (add GRRegs:$addr, immUs4:$offset)), + (STW_2rus GRRegs:$val, GRRegs:$addr, (div4_xform immUs4:$offset))>; +def : Pat<(store GRRegs:$val, GRRegs:$addr), + (STW_2rus GRRegs:$val, GRRegs:$addr, 0)>; + +/// cttz +def : Pat<(cttz GRRegs:$src), (CLZ_l2r (BITREV_l2r GRRegs:$src))>; + +/// +/// branch patterns +/// + +// unconditional branch +def : Pat<(br bb:$addr), (BRFU_lu6 bb:$addr)>; + +// direct match equal/notequal zero brcond +def : Pat<(brcond (setne GRRegs:$lhs, 0), bb:$dst), + (BRFT_lru6 GRRegs:$lhs, bb:$dst)>; +def : Pat<(brcond (seteq GRRegs:$lhs, 0), bb:$dst), + (BRFF_lru6 GRRegs:$lhs, bb:$dst)>; + +def : Pat<(brcond (setle GRRegs:$lhs, GRRegs:$rhs), bb:$dst), + (BRFF_lru6 (LSS_3r GRRegs:$rhs, GRRegs:$lhs), bb:$dst)>; +def : Pat<(brcond (setule GRRegs:$lhs, GRRegs:$rhs), bb:$dst), + (BRFF_lru6 (LSU_3r GRRegs:$rhs, GRRegs:$lhs), bb:$dst)>; +def : Pat<(brcond (setge GRRegs:$lhs, GRRegs:$rhs), bb:$dst), + (BRFF_lru6 (LSS_3r GRRegs:$lhs, GRRegs:$rhs), bb:$dst)>; +def : Pat<(brcond (setuge GRRegs:$lhs, GRRegs:$rhs), bb:$dst), + (BRFF_lru6 (LSU_3r GRRegs:$lhs, GRRegs:$rhs), bb:$dst)>; +def : Pat<(brcond (setne GRRegs:$lhs, GRRegs:$rhs), bb:$dst), + (BRFF_lru6 (EQ_3r GRRegs:$lhs, GRRegs:$rhs), bb:$dst)>; +def : Pat<(brcond (setne GRRegs:$lhs, immUs:$rhs), bb:$dst), + (BRFF_lru6 (EQ_2rus GRRegs:$lhs, immUs:$rhs), bb:$dst)>; + +// generic brcond pattern +def : Pat<(brcond GRRegs:$cond, bb:$addr), (BRFT_lru6 GRRegs:$cond, bb:$addr)>; + + +/// +/// Select patterns +/// + +// direct match equal/notequal zero select +def : Pat<(select (setne GRRegs:$lhs, 0), GRRegs:$T, GRRegs:$F), + (SELECT_CC GRRegs:$lhs, GRRegs:$T, GRRegs:$F)>; + +def : Pat<(select (seteq GRRegs:$lhs, 0), GRRegs:$T, GRRegs:$F), + (SELECT_CC GRRegs:$lhs, GRRegs:$F, GRRegs:$T)>; + +def : Pat<(select (setle GRRegs:$lhs, GRRegs:$rhs), GRRegs:$T, GRRegs:$F), + (SELECT_CC (LSS_3r GRRegs:$rhs, GRRegs:$lhs), GRRegs:$F, GRRegs:$T)>; +def : Pat<(select (setule GRRegs:$lhs, GRRegs:$rhs), GRRegs:$T, GRRegs:$F), + (SELECT_CC (LSU_3r GRRegs:$rhs, GRRegs:$lhs), GRRegs:$F, GRRegs:$T)>; +def : Pat<(select (setge GRRegs:$lhs, GRRegs:$rhs), GRRegs:$T, GRRegs:$F), + (SELECT_CC (LSS_3r GRRegs:$lhs, GRRegs:$rhs), GRRegs:$F, GRRegs:$T)>; +def : Pat<(select (setuge GRRegs:$lhs, GRRegs:$rhs), GRRegs:$T, GRRegs:$F), + (SELECT_CC (LSU_3r GRRegs:$lhs, GRRegs:$rhs), GRRegs:$F, GRRegs:$T)>; +def : Pat<(select (setne GRRegs:$lhs, GRRegs:$rhs), GRRegs:$T, GRRegs:$F), + (SELECT_CC (EQ_3r GRRegs:$lhs, GRRegs:$rhs), GRRegs:$F, GRRegs:$T)>; +def : Pat<(select (setne GRRegs:$lhs, immUs:$rhs), GRRegs:$T, GRRegs:$F), + (SELECT_CC (EQ_2rus GRRegs:$lhs, immUs:$rhs), GRRegs:$F, GRRegs:$T)>; + +/// +/// setcc patterns, only matched when none of the above brcond +/// patterns match +/// + +// setcc 2 register operands +def : Pat<(setle GRRegs:$lhs, GRRegs:$rhs), + (EQ_2rus (LSS_3r GRRegs:$rhs, GRRegs:$lhs), 0)>; +def : Pat<(setule GRRegs:$lhs, GRRegs:$rhs), + (EQ_2rus (LSU_3r GRRegs:$rhs, GRRegs:$lhs), 0)>; + +def : Pat<(setgt GRRegs:$lhs, GRRegs:$rhs), + (LSS_3r GRRegs:$rhs, GRRegs:$lhs)>; +def : Pat<(setugt GRRegs:$lhs, GRRegs:$rhs), + (LSU_3r GRRegs:$rhs, GRRegs:$lhs)>; + +def : Pat<(setge GRRegs:$lhs, GRRegs:$rhs), + (EQ_2rus (LSS_3r GRRegs:$lhs, GRRegs:$rhs), 0)>; +def : Pat<(setuge GRRegs:$lhs, GRRegs:$rhs), + (EQ_2rus (LSU_3r GRRegs:$lhs, GRRegs:$rhs), 0)>; + +def : Pat<(setlt GRRegs:$lhs, GRRegs:$rhs), + (LSS_3r GRRegs:$lhs, GRRegs:$rhs)>; +def : Pat<(setult GRRegs:$lhs, GRRegs:$rhs), + (LSU_3r GRRegs:$lhs, GRRegs:$rhs)>; + +def : Pat<(setne GRRegs:$lhs, GRRegs:$rhs), + (EQ_2rus (EQ_3r GRRegs:$lhs, GRRegs:$rhs), 0)>; + +def : Pat<(seteq GRRegs:$lhs, GRRegs:$rhs), + (EQ_3r GRRegs:$lhs, GRRegs:$rhs)>; + +// setcc reg/imm operands +def : Pat<(seteq GRRegs:$lhs, immUs:$rhs), + (EQ_2rus GRRegs:$lhs, immUs:$rhs)>; +def : Pat<(setne GRRegs:$lhs, immUs:$rhs), + (EQ_2rus (EQ_2rus GRRegs:$lhs, immUs:$rhs), 0)>; + +// misc +def : Pat<(add GRRegs:$addr, immUs4:$offset), + (LDAWF_l2rus GRRegs:$addr, (div4_xform immUs4:$offset))>; + +def : Pat<(sub GRRegs:$addr, immUs4:$offset), + (LDAWB_l2rus GRRegs:$addr, (div4_xform immUs4:$offset))>; + +def : Pat<(and GRRegs:$val, immMskBitp:$mask), + (ZEXT_rus GRRegs:$val, (msksize_xform immMskBitp:$mask))>; + +// (sub X, imm) gets canonicalized to (add X, -imm). Match this form. +def : Pat<(add GRRegs:$src1, immUsNeg:$src2), + (SUB_2rus GRRegs:$src1, (neg_xform immUsNeg:$src2))>; + +def : Pat<(add GRRegs:$src1, immUs4Neg:$src2), + (LDAWB_l2rus GRRegs:$src1, (div4neg_xform immUs4Neg:$src2))>; + +/// +/// Some peepholes +/// + +def : Pat<(mul GRRegs:$src, 3), + (LDA16F_l3r GRRegs:$src, GRRegs:$src)>; + +def : Pat<(mul GRRegs:$src, 5), + (LDAWF_l3r GRRegs:$src, GRRegs:$src)>; + +def : Pat<(mul GRRegs:$src, -3), + (LDAWB_l3r GRRegs:$src, GRRegs:$src)>; + +// ashr X, 32 is equivalent to ashr X, 31 on the XCore. +def : Pat<(sra GRRegs:$src, 31), + (ASHR_l2rus GRRegs:$src, 32)>; + diff --git a/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.h b/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.h new file mode 100644 index 000000000000..43adb0f917c9 --- /dev/null +++ b/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.h @@ -0,0 +1,69 @@ +//====- XCoreMachineFuctionInfo.h - XCore machine function info -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares XCore-specific per-machine-function information. +// +//===----------------------------------------------------------------------===// + +#ifndef XCOREMACHINEFUNCTIONINFO_H +#define XCOREMACHINEFUNCTIONINFO_H + +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include + +namespace llvm { + +// Forward declarations +class Function; + +/// XCoreFunctionInfo - This class is derived from MachineFunction private +/// XCore target-specific information for each MachineFunction. +class XCoreFunctionInfo : public MachineFunctionInfo { +private: + bool UsesLR; + int LRSpillSlot; + int FPSpillSlot; + int VarArgsFrameIndex; + std::vector > SpillLabels; + +public: + XCoreFunctionInfo() : + UsesLR(false), + LRSpillSlot(0), + FPSpillSlot(0), + VarArgsFrameIndex(0) {} + + XCoreFunctionInfo(MachineFunction &MF) : + UsesLR(false), + LRSpillSlot(0), + FPSpillSlot(0), + VarArgsFrameIndex(0) {} + + ~XCoreFunctionInfo() {} + + void setVarArgsFrameIndex(int off) { VarArgsFrameIndex = off; } + int getVarArgsFrameIndex() const { return VarArgsFrameIndex; } + + void setUsesLR(bool val) { UsesLR = val; } + bool getUsesLR() const { return UsesLR; } + + void setLRSpillSlot(int off) { LRSpillSlot = off; } + int getLRSpillSlot() const { return LRSpillSlot; } + + void setFPSpillSlot(int off) { FPSpillSlot = off; } + int getFPSpillSlot() const { return FPSpillSlot; } + + std::vector >&getSpillLabels() { + return SpillLabels; + } +}; +} // End llvm namespace + +#endif // XCOREMACHINEFUNCTIONINFO_H diff --git a/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp b/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp new file mode 100644 index 000000000000..5293c82f1341 --- /dev/null +++ b/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp @@ -0,0 +1,596 @@ +//===- XCoreRegisterInfo.cpp - XCore Register Information -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the XCore implementation of the MRegisterInfo class. +// +//===----------------------------------------------------------------------===// + +#include "XCoreRegisterInfo.h" +#include "XCoreMachineFunctionInfo.h" +#include "XCore.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineLocation.h" +#include "llvm/CodeGen/MachineModuleInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/RegisterScavenging.h" +#include "llvm/Target/TargetFrameInfo.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetOptions.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Type.h" +#include "llvm/Function.h" +#include "llvm/ADT/BitVector.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/Support/Debug.h" + +using namespace llvm; + +XCoreRegisterInfo::XCoreRegisterInfo(const TargetInstrInfo &tii) + : XCoreGenRegisterInfo(XCore::ADJCALLSTACKDOWN, XCore::ADJCALLSTACKUP), + TII(tii) { +} + +// helper functions +static inline bool isImmUs(unsigned val) { + return val <= 11; +} + +static inline bool isImmU6(unsigned val) { + return val < (1 << 6); +} + +static inline bool isImmU16(unsigned val) { + return val < (1 << 16); +} + +static const unsigned XCore_ArgRegs[] = { + XCore::R0, XCore::R1, XCore::R2, XCore::R3 +}; + +const unsigned * XCoreRegisterInfo::getArgRegs(const MachineFunction *MF) +{ + return XCore_ArgRegs; +} + +unsigned XCoreRegisterInfo::getNumArgRegs(const MachineFunction *MF) +{ + return array_lengthof(XCore_ArgRegs); +} + +bool XCoreRegisterInfo::needsFrameMoves(const MachineFunction &MF) +{ + const MachineFrameInfo *MFI = MF.getFrameInfo(); + MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); + return (MMI && MMI->hasDebugInfo()) || + !MF.getFunction()->doesNotThrow() || + UnwindTablesMandatory; +} + +const unsigned* XCoreRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) + const { + static const unsigned CalleeSavedRegs[] = { + XCore::R4, XCore::R5, XCore::R6, XCore::R7, + XCore::R8, XCore::R9, XCore::R10, XCore::LR, + 0 + }; + return CalleeSavedRegs; +} + +const TargetRegisterClass* const* +XCoreRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const { + static const TargetRegisterClass * const CalleeSavedRegClasses[] = { + XCore::GRRegsRegisterClass, XCore::GRRegsRegisterClass, + XCore::GRRegsRegisterClass, XCore::GRRegsRegisterClass, + XCore::GRRegsRegisterClass, XCore::GRRegsRegisterClass, + XCore::GRRegsRegisterClass, XCore::RRegsRegisterClass, + 0 + }; + return CalleeSavedRegClasses; +} + +BitVector XCoreRegisterInfo::getReservedRegs(const MachineFunction &MF) const { + BitVector Reserved(getNumRegs()); + Reserved.set(XCore::CP); + Reserved.set(XCore::DP); + Reserved.set(XCore::SP); + Reserved.set(XCore::LR); + if (hasFP(MF)) { + Reserved.set(XCore::R10); + } + return Reserved; +} + +bool +XCoreRegisterInfo::requiresRegisterScavenging(const MachineFunction &MF) const { + // TODO can we estimate stack size? + return hasFP(MF); +} + +bool XCoreRegisterInfo::hasFP(const MachineFunction &MF) const { + return NoFramePointerElim || MF.getFrameInfo()->hasVarSizedObjects(); +} + +// This function eliminates ADJCALLSTACKDOWN, +// ADJCALLSTACKUP pseudo instructions +void XCoreRegisterInfo:: +eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, + MachineBasicBlock::iterator I) const { + if (!hasReservedCallFrame(MF)) { + // Turn the adjcallstackdown instruction into 'extsp ' and the + // adjcallstackup instruction into 'ldaw sp, sp[]' + MachineInstr *Old = I; + uint64_t Amount = Old->getOperand(0).getImm(); + if (Amount != 0) { + // We need to keep the stack aligned properly. To do this, we round the + // amount of space needed for the outgoing arguments up to the next + // alignment boundary. + unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment(); + Amount = (Amount+Align-1)/Align*Align; + + assert(Amount%4 == 0); + Amount /= 4; + + bool isU6 = isImmU6(Amount); + + if (!isU6 && !isImmU16(Amount)) { + // FIX could emit multiple instructions in this case. + cerr << "eliminateCallFramePseudoInstr size too big: " + << Amount << "\n"; + abort(); + } + + MachineInstr *New; + if (Old->getOpcode() == XCore::ADJCALLSTACKDOWN) { + int Opcode = isU6 ? XCore::EXTSP_u6 : XCore::EXTSP_lu6; + New=BuildMI(MF, TII.get(Opcode)) + .addImm(Amount); + } else { + assert(Old->getOpcode() == XCore::ADJCALLSTACKUP); + int Opcode = isU6 ? XCore::LDAWSP_ru6_RRegs : XCore::LDAWSP_lru6_RRegs; + New=BuildMI(MF, TII.get(Opcode), XCore::SP) + .addImm(Amount); + } + + // Replace the pseudo instruction with a new instruction... + MBB.insert(I, New); + } + } + + MBB.erase(I); +} + +void XCoreRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, + int SPAdj, RegScavenger *RS) const { + assert(SPAdj == 0 && "Unexpected"); + MachineInstr &MI = *II; + unsigned i = 0; + + while (!MI.getOperand(i).isFI()) { + ++i; + assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); + } + + MachineOperand &FrameOp = MI.getOperand(i); + int FrameIndex = FrameOp.getIndex(); + + MachineFunction &MF = *MI.getParent()->getParent(); + int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex); + int StackSize = MF.getFrameInfo()->getStackSize(); + + #ifndef NDEBUG + DOUT << "\nFunction : " << MF.getFunction()->getName() << "\n"; + DOUT << "<--------->\n"; + MI.print(DOUT); + DOUT << "FrameIndex : " << FrameIndex << "\n"; + DOUT << "FrameOffset : " << Offset << "\n"; + DOUT << "StackSize : " << StackSize << "\n"; + #endif + + Offset += StackSize; + + // fold constant into offset. + Offset += MI.getOperand(i + 1).getImm(); + MI.getOperand(i + 1).ChangeToImmediate(0); + + assert(Offset%4 == 0 && "Misaligned stack offset"); + + #ifndef NDEBUG + DOUT << "Offset : " << Offset << "\n"; + DOUT << "<--------->\n"; + #endif + + Offset/=4; + + bool FP = hasFP(MF); + + if (FP) { + bool isUs = isImmUs(Offset); + MachineBasicBlock &MBB = *MI.getParent(); + unsigned FramePtr = XCore::R10; + unsigned Reg = MI.getOperand(0).getReg(); + bool isKill = MI.getOperand(0).isKill(); + + if (Reg == XCore::LR) { + // The LR should have been save in the prologue. + cerr << "saving LR to FP unimplemented\n"; + abort(); + } + + MachineInstr *New = 0; + if (!isUs) { + if (!RS) { + cerr << "eliminateFrameIndex Frame size too big: " << Offset << "\n"; + abort(); + } + unsigned ScratchReg = RS->scavengeRegister(XCore::GRRegsRegisterClass, II, + SPAdj); + loadConstant(MBB, II, ScratchReg, Offset); + switch (MI.getOpcode()) { + case XCore::LDWSP_lru6: + New = BuildMI(MBB, II, TII.get(XCore::LDW_3r), Reg) + .addReg(FramePtr) + .addReg(ScratchReg, false, false, true); + break; + case XCore::STWSP_lru6: + New = BuildMI(MBB, II, TII.get(XCore::STW_3r)) + .addReg(Reg, false, false, isKill) + .addReg(FramePtr) + .addReg(ScratchReg, false, false, true); + break; + case XCore::LDAWSP_lru6: + New = BuildMI(MBB, II, TII.get(XCore::LDAWF_l3r), Reg) + .addReg(FramePtr) + .addReg(ScratchReg, false, false, true); + break; + default: + assert(0 && "Unexpected Opcode\n"); + } + } else { + switch (MI.getOpcode()) { + case XCore::LDWSP_lru6: + New = BuildMI(MBB, II, TII.get(XCore::LDW_2rus), Reg) + .addReg(FramePtr) + .addImm(Offset); + break; + case XCore::STWSP_lru6: + New = BuildMI(MBB, II, TII.get(XCore::STW_2rus)) + .addReg(Reg, false, false, isKill) + .addReg(FramePtr) + .addImm(Offset); + break; + case XCore::LDAWSP_lru6: + New = BuildMI(MBB, II, TII.get(XCore::LDAWF_l2rus), Reg) + .addReg(FramePtr) + .addImm(Offset); + break; + default: + assert(0 && "Unexpected Opcode\n"); + } + } + + // Erase old instruction. + MBB.erase(II); + } else { + bool isU6 = isImmU6(Offset); + if (!isU6 && !isImmU16(Offset)) { + // FIXME could make this work for LDWSP, LDAWSP. + cerr << "eliminateFrameIndex Frame size too big: " << Offset << "\n"; + abort(); + } + + int NewOpcode = MI.getOpcode(); + + switch (NewOpcode) { + case XCore::LDWSP_lru6: + NewOpcode = (isU6) ? XCore::LDWSP_ru6 : XCore::LDWSP_lru6; + break; + case XCore::STWSP_lru6: + NewOpcode = (isU6) ? XCore::STWSP_ru6 : XCore::STWSP_lru6; + break; + case XCore::LDAWSP_lru6: + NewOpcode = (isU6) ? XCore::LDAWSP_ru6 : XCore::LDAWSP_lru6; + break; + default: + assert(0 && "Unexpected Opcode\n"); + } + + MI.setDesc(TII.get(NewOpcode)); + FrameOp.ChangeToImmediate(Offset); + } +} + +void +XCoreRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, + RegScavenger *RS) const { + MachineFrameInfo *MFI = MF.getFrameInfo(); + bool LRUsed = MF.getRegInfo().isPhysRegUsed(XCore::LR); + const TargetRegisterClass *RC = XCore::GRRegsRegisterClass; + XCoreFunctionInfo *XFI = MF.getInfo(); + if (LRUsed) { + MF.getRegInfo().setPhysRegUnused(XCore::LR); + + bool isVarArg = MF.getFunction()->isVarArg(); + int FrameIdx; + if (! isVarArg) { + // A fixed offset of 0 allows us to save / restore LR using entsp / retsp. + FrameIdx = MFI->CreateFixedObject(RC->getSize(), 0); + } else { + FrameIdx = MFI->CreateStackObject(RC->getSize(), RC->getAlignment()); + } + XFI->setUsesLR(FrameIdx); + XFI->setLRSpillSlot(FrameIdx); + } + if (requiresRegisterScavenging(MF)) { + // Reserve a slot close to SP or frame pointer. + RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(), + RC->getAlignment())); + } + if (hasFP(MF)) { + // A callee save register is used to hold the FP. + // This needs saving / restoring in the epilogue / prologue. + XFI->setFPSpillSlot(MFI->CreateStackObject(RC->getSize(), + RC->getAlignment())); + } +} + +void XCoreRegisterInfo:: +processFunctionBeforeFrameFinalized(MachineFunction &MF) const { + +} + +void XCoreRegisterInfo:: +loadConstant(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, + unsigned DstReg, int64_t Value) const { + // TODO use mkmsk if possible. + if (!isImmU16(Value)) { + // TODO use constant pool. + cerr << "loadConstant value too big " << Value << "\n"; + abort(); + } + int Opcode = isImmU6(Value) ? XCore::LDC_ru6 : XCore::LDC_lru6; + BuildMI(MBB, I, TII.get(Opcode), DstReg).addImm(Value); +} + +void XCoreRegisterInfo:: +storeToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, + unsigned SrcReg, int Offset) const { + assert(Offset%4 == 0 && "Misaligned stack offset"); + Offset/=4; + bool isU6 = isImmU6(Offset); + if (!isU6 && !isImmU16(Offset)) { + cerr << "storeToStack offset too big " << Offset << "\n"; + abort(); + } + int Opcode = isU6 ? XCore::STWSP_ru6 : XCore::STWSP_lru6; + BuildMI(MBB, I, TII.get(Opcode)) + .addReg(SrcReg) + .addImm(Offset) + .addImm(0); +} + +void XCoreRegisterInfo:: +loadFromStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, + unsigned DstReg, int Offset) const { + assert(Offset%4 == 0 && "Misaligned stack offset"); + Offset/=4; + bool isU6 = isImmU6(Offset); + if (!isU6 && !isImmU16(Offset)) { + cerr << "storeToStack offset too big " << Offset << "\n"; + abort(); + } + int Opcode = isU6 ? XCore::LDWSP_ru6 : XCore::LDWSP_lru6; + BuildMI(MBB, I, TII.get(Opcode), DstReg) + .addImm(Offset) + .addImm(0); +} + +void XCoreRegisterInfo::emitPrologue(MachineFunction &MF) const { + MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB + MachineBasicBlock::iterator MBBI = MBB.begin(); + MachineFrameInfo *MFI = MF.getFrameInfo(); + MachineModuleInfo *MMI = MFI->getMachineModuleInfo(); + XCoreFunctionInfo *XFI = MF.getInfo(); + + bool FP = hasFP(MF); + + // Work out frame sizes. + int FrameSize = MFI->getStackSize(); + + assert(FrameSize%4 == 0 && "Misaligned frame size"); + + FrameSize/=4; + + bool isU6 = isImmU6(FrameSize); + + if (!isU6 && !isImmU16(FrameSize)) { + // FIXME could emit multiple instructions. + cerr << "emitPrologue Frame size too big: " << FrameSize << "\n"; + abort(); + } + bool emitFrameMoves = needsFrameMoves(MF); + + // Do we need to allocate space on the stack? + if (FrameSize) { + bool saveLR = XFI->getUsesLR(); + bool LRSavedOnEntry = false; + int Opcode; + if (saveLR && (MFI->getObjectOffset(XFI->getLRSpillSlot()) == 0)) { + Opcode = (isU6) ? XCore::ENTSP_u6 : XCore::ENTSP_lu6; + MBB.addLiveIn(XCore::LR); + saveLR = false; + LRSavedOnEntry = true; + } else { + Opcode = (isU6) ? XCore::EXTSP_u6 : XCore::EXTSP_lu6; + } + BuildMI(MBB, MBBI, TII.get(Opcode)).addImm(FrameSize); + + if (emitFrameMoves) { + std::vector &Moves = MMI->getFrameMoves(); + + // Show update of SP. + unsigned FrameLabelId = MMI->NextLabelID(); + BuildMI(MBB, MBBI, TII.get(XCore::DBG_LABEL)).addImm(FrameLabelId); + + MachineLocation SPDst(MachineLocation::VirtualFP); + MachineLocation SPSrc(MachineLocation::VirtualFP, -FrameSize * 4); + Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); + + if (LRSavedOnEntry) { + MachineLocation CSDst(MachineLocation::VirtualFP, 0); + MachineLocation CSSrc(XCore::LR); + Moves.push_back(MachineMove(FrameLabelId, CSDst, CSSrc)); + } + } + if (saveLR) { + int LRSpillOffset = MFI->getObjectOffset(XFI->getLRSpillSlot()); + storeToStack(MBB, MBBI, XCore::LR, LRSpillOffset + FrameSize*4); + MBB.addLiveIn(XCore::LR); + + if (emitFrameMoves) { + unsigned SaveLRLabelId = MMI->NextLabelID(); + BuildMI(MBB, MBBI, TII.get(XCore::DBG_LABEL)).addImm(SaveLRLabelId); + MachineLocation CSDst(MachineLocation::VirtualFP, LRSpillOffset); + MachineLocation CSSrc(XCore::LR); + MMI->getFrameMoves().push_back(MachineMove(SaveLRLabelId, + CSDst, CSSrc)); + } + } + } + + if (FP) { + // Save R10 to the stack. + int FPSpillOffset = MFI->getObjectOffset(XFI->getFPSpillSlot()); + storeToStack(MBB, MBBI, XCore::R10, FPSpillOffset + FrameSize*4); + // R10 is live-in. It is killed at the spill. + MBB.addLiveIn(XCore::R10); + if (emitFrameMoves) { + unsigned SaveR10LabelId = MMI->NextLabelID(); + BuildMI(MBB, MBBI, TII.get(XCore::DBG_LABEL)).addImm(SaveR10LabelId); + MachineLocation CSDst(MachineLocation::VirtualFP, FPSpillOffset); + MachineLocation CSSrc(XCore::R10); + MMI->getFrameMoves().push_back(MachineMove(SaveR10LabelId, + CSDst, CSSrc)); + } + // Set the FP from the SP. + unsigned FramePtr = XCore::R10; + BuildMI(MBB, MBBI, TII.get(XCore::LDAWSP_ru6), FramePtr) + .addImm(0) + .addImm(0); + if (emitFrameMoves) { + // Show FP is now valid. + unsigned FrameLabelId = MMI->NextLabelID(); + BuildMI(MBB, MBBI, TII.get(XCore::DBG_LABEL)).addImm(FrameLabelId); + MachineLocation SPDst(FramePtr); + MachineLocation SPSrc(MachineLocation::VirtualFP); + MMI->getFrameMoves().push_back(MachineMove(FrameLabelId, SPDst, SPSrc)); + } + } + + if (emitFrameMoves) { + // Frame moves for callee saved. + std::vector &Moves = MMI->getFrameMoves(); + std::vector >&SpillLabels = + XFI->getSpillLabels(); + for (unsigned I = 0, E = SpillLabels.size(); I != E; ++I) { + unsigned SpillLabel = SpillLabels[I].first; + CalleeSavedInfo &CSI = SpillLabels[I].second; + int Offset = MFI->getObjectOffset(CSI.getFrameIdx()); + unsigned Reg = CSI.getReg(); + MachineLocation CSDst(MachineLocation::VirtualFP, Offset); + MachineLocation CSSrc(Reg); + Moves.push_back(MachineMove(SpillLabel, CSDst, CSSrc)); + } + } +} + +void XCoreRegisterInfo::emitEpilogue(MachineFunction &MF, + MachineBasicBlock &MBB) const { + MachineFrameInfo *MFI = MF.getFrameInfo(); + MachineBasicBlock::iterator MBBI = prior(MBB.end()); + + bool FP = hasFP(MF); + + if (FP) { + // Restore the stack pointer. + unsigned FramePtr = XCore::R10; + BuildMI(MBB, MBBI, TII.get(XCore::SETSP_1r)) + .addReg(FramePtr); + } + + // Work out frame sizes. + int FrameSize = MFI->getStackSize(); + + assert(FrameSize%4 == 0 && "Misaligned frame size"); + + FrameSize/=4; + + bool isU6 = isImmU6(FrameSize); + + if (!isU6 && !isImmU16(FrameSize)) { + // FIXME could emit multiple instructions. + cerr << "emitEpilogue Frame size too big: " << FrameSize << "\n"; + abort(); + } + + if (FrameSize) { + XCoreFunctionInfo *XFI = MF.getInfo(); + + if (FP) { + // Restore R10 + int FPSpillOffset = MFI->getObjectOffset(XFI->getFPSpillSlot()); + FPSpillOffset += FrameSize*4; + loadFromStack(MBB, MBBI, XCore::R10, FPSpillOffset); + } + bool restoreLR = XFI->getUsesLR(); + if (restoreLR && MFI->getObjectOffset(XFI->getLRSpillSlot()) != 0) { + int LRSpillOffset = MFI->getObjectOffset(XFI->getLRSpillSlot()); + LRSpillOffset += FrameSize*4; + loadFromStack(MBB, MBBI, XCore::LR, LRSpillOffset); + restoreLR = false; + } + if (restoreLR) { + // Fold prologue into return instruction + assert(MBBI->getOpcode() == XCore::RETSP_u6 + || MBBI->getOpcode() == XCore::RETSP_lu6); + int Opcode = (isU6) ? XCore::RETSP_u6 : XCore::RETSP_lu6; + BuildMI(MBB, MBBI, TII.get(Opcode)).addImm(FrameSize); + MBB.erase(MBBI); + } else { + int Opcode = (isU6) ? XCore::LDAWSP_ru6_RRegs : XCore::LDAWSP_lru6_RRegs; + BuildMI(MBB, MBBI, TII.get(Opcode), XCore::SP).addImm(FrameSize); + } + } +} + +int XCoreRegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const { + return XCoreGenRegisterInfo::getDwarfRegNumFull(RegNum, 0); +} + +unsigned XCoreRegisterInfo::getFrameRegister(MachineFunction &MF) const { + bool FP = hasFP(MF); + + return FP ? XCore::R10 : XCore::SP; +} + +unsigned XCoreRegisterInfo::getRARegister() const { + return XCore::LR; +} + +void XCoreRegisterInfo::getInitialFrameState(std::vector &Moves) + const { + // Initial state of the frame pointer is SP. + MachineLocation Dst(MachineLocation::VirtualFP); + MachineLocation Src(XCore::SP, 0); + Moves.push_back(MachineMove(0, Dst, Src)); +} + +#include "XCoreGenRegisterInfo.inc" + diff --git a/llvm/lib/Target/XCore/XCoreRegisterInfo.h b/llvm/lib/Target/XCore/XCoreRegisterInfo.h new file mode 100644 index 000000000000..dd4dcc703bdc --- /dev/null +++ b/llvm/lib/Target/XCore/XCoreRegisterInfo.h @@ -0,0 +1,94 @@ +//===- XCoreRegisterInfo.h - XCore Register Information Impl ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the XCore implementation of the MRegisterInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef XCOREREGISTERINFO_H +#define XCOREREGISTERINFO_H + +#include "llvm/Target/TargetRegisterInfo.h" +#include "XCoreGenRegisterInfo.h.inc" + +namespace llvm { + +class TargetInstrInfo; + +struct XCoreRegisterInfo : public XCoreGenRegisterInfo { +private: + const TargetInstrInfo &TII; + + void loadConstant(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + unsigned DstReg, int64_t Value) const; + + void storeToStack(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + unsigned SrcReg, int Offset) const; + + void loadFromStack(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + unsigned DstReg, int Offset) const; + +public: + XCoreRegisterInfo(const TargetInstrInfo &tii); + + /// Code Generation virtual methods... + + const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const; + + const TargetRegisterClass* const* getCalleeSavedRegClasses( + const MachineFunction *MF = 0) const; + + BitVector getReservedRegs(const MachineFunction &MF) const; + + bool requiresRegisterScavenging(const MachineFunction &MF) const; + + bool hasFP(const MachineFunction &MF) const; + + void eliminateCallFramePseudoInstr(MachineFunction &MF, + MachineBasicBlock &MBB, + MachineBasicBlock::iterator I) const; + + void eliminateFrameIndex(MachineBasicBlock::iterator II, + int SPAdj, RegScavenger *RS = NULL) const; + + void processFunctionBeforeCalleeSavedScan(MachineFunction &MF, + RegScavenger *RS = NULL) const; + + void processFunctionBeforeFrameFinalized(MachineFunction &MF) const; + + void emitPrologue(MachineFunction &MF) const; + void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const; + + // Debug information queries. + unsigned getRARegister() const; + unsigned getFrameRegister(MachineFunction &MF) const; + void getInitialFrameState(std::vector &Moves) const; + + //! Return the array of argument passing registers + /*! + \note The size of this array is returned by getArgRegsSize(). + */ + static const unsigned *getArgRegs(const MachineFunction *MF = 0); + + //! Return the size of the argument passing register array + static unsigned getNumArgRegs(const MachineFunction *MF = 0); + + //! Return whether to emit frame moves + static bool needsFrameMoves(const MachineFunction &MF); + + //! Get DWARF debugging register number + int getDwarfRegNum(unsigned RegNum, bool isEH) const; +}; + +} // end namespace llvm + +#endif diff --git a/llvm/lib/Target/XCore/XCoreRegisterInfo.td b/llvm/lib/Target/XCore/XCoreRegisterInfo.td new file mode 100644 index 000000000000..62daf5d4567b --- /dev/null +++ b/llvm/lib/Target/XCore/XCoreRegisterInfo.td @@ -0,0 +1,91 @@ +//===- XCoreRegisterInfo.td - XCore Register defs ----------*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Declarations that describe the XCore register file +//===----------------------------------------------------------------------===// + +class XCoreReg : Register { + field bits<4> Num; + let Namespace = "XCore"; +} + +// Registers are identified with 4-bit ID numbers. +// Ri - 32-bit integer registers +class Ri num, string n> : XCoreReg { + let Num = num; +} + +// CPU registers +def R0 : Ri< 0, "r0">, DwarfRegNum<[0]>; +def R1 : Ri< 1, "r1">, DwarfRegNum<[1]>; +def R2 : Ri< 2, "r2">, DwarfRegNum<[2]>; +def R3 : Ri< 3, "r3">, DwarfRegNum<[3]>; +def R4 : Ri< 4, "r4">, DwarfRegNum<[4]>; +def R5 : Ri< 5, "r5">, DwarfRegNum<[5]>; +def R6 : Ri< 6, "r6">, DwarfRegNum<[6]>; +def R7 : Ri< 7, "r7">, DwarfRegNum<[7]>; +def R8 : Ri< 8, "r8">, DwarfRegNum<[8]>; +def R9 : Ri< 9, "r9">, DwarfRegNum<[9]>; +def R10 : Ri<10, "r10">, DwarfRegNum<[10]>; +def R11 : Ri<11, "r11">, DwarfRegNum<[11]>; +def CP : Ri<12, "cp">, DwarfRegNum<[12]>; +def DP : Ri<13, "dp">, DwarfRegNum<[13]>; +def SP : Ri<14, "sp">, DwarfRegNum<[14]>; +def LR : Ri<15, "lr">, DwarfRegNum<[15]>; + +// Register classes. +// +def GRRegs : RegisterClass<"XCore", [i32], 32, + // Return values and arguments + [R0, R1, R2, R3, + // Not preserved across procedure calls + R11, + // Callee save + R4, R5, R6, R7, R8, R9, R10]> { + let MethodProtos = [{ + iterator allocation_order_begin(const MachineFunction &MF) const; + iterator allocation_order_end(const MachineFunction &MF) const; + }]; + let MethodBodies = [{ + GRRegsClass::iterator + GRRegsClass::allocation_order_begin(const MachineFunction &MF) const { + return begin(); + } + GRRegsClass::iterator + GRRegsClass::allocation_order_end(const MachineFunction &MF) const { + const TargetMachine &TM = MF.getTarget(); + const TargetRegisterInfo *RI = TM.getRegisterInfo(); + if (RI->hasFP(MF)) + return end()-1; // don't allocate R10 + else + return end(); + } + }]; +} + +def RRegs : RegisterClass<"XCore", [i32], 32, + // Reserved + [CP, DP, SP, LR]> { + let MethodProtos = [{ + iterator allocation_order_begin(const MachineFunction &MF) const; + iterator allocation_order_end(const MachineFunction &MF) const; + }]; + let MethodBodies = [{ + RRegsClass::iterator + RRegsClass::allocation_order_begin(const MachineFunction &MF) const { + return begin(); + } + RRegsClass::iterator + RRegsClass::allocation_order_end(const MachineFunction &MF) const { + // No allocatable registers + return begin(); + } + }]; +} diff --git a/llvm/lib/Target/XCore/XCoreSubtarget.cpp b/llvm/lib/Target/XCore/XCoreSubtarget.cpp new file mode 100644 index 000000000000..dc53da4ddf0b --- /dev/null +++ b/llvm/lib/Target/XCore/XCoreSubtarget.cpp @@ -0,0 +1,28 @@ +//===- XCoreSubtarget.cpp - XCore Subtarget Information -----------*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the XCore specific subclass of TargetSubtarget. +// +//===----------------------------------------------------------------------===// + +#include "XCoreSubtarget.h" +#include "XCore.h" +#include "XCoreGenSubtarget.inc" +using namespace llvm; + +XCoreSubtarget::XCoreSubtarget(const TargetMachine &TM, const Module &M, + const std::string &FS) + : IsXS1A(false), + IsXS1B(false) +{ + std::string CPU = "xs1a-generic"; + + // Parse features string. + ParseSubtargetFeatures(FS, CPU); +} diff --git a/llvm/lib/Target/XCore/XCoreSubtarget.h b/llvm/lib/Target/XCore/XCoreSubtarget.h new file mode 100644 index 000000000000..779018f3fce1 --- /dev/null +++ b/llvm/lib/Target/XCore/XCoreSubtarget.h @@ -0,0 +1,45 @@ +//=====-- XCoreSubtarget.h - Define Subtarget for the XCore -----*- C++ -*--==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the XCore specific subclass of TargetSubtarget. +// +//===----------------------------------------------------------------------===// + +#ifndef XCORESUBTARGET_H +#define XCORESUBTARGET_H + +#include "llvm/Target/TargetSubtarget.h" +#include "llvm/Target/TargetMachine.h" + +#include + +namespace llvm { +class Module; + +class XCoreSubtarget : public TargetSubtarget { + bool IsXS1A; + bool IsXS1B; + +public: + /// This constructor initializes the data members to match that + /// of the specified module. + /// + XCoreSubtarget(const TargetMachine &TM, const Module &M, + const std::string &FS); + + bool isXS1A() const { return IsXS1A; } + bool isXS1B() const { return IsXS1B; } + + /// ParseSubtargetFeatures - Parses features string setting specified + /// subtarget options. Definition of function is auto generated by tblgen. + void ParseSubtargetFeatures(const std::string &FS, const std::string &CPU); +}; +} // End llvm namespace + +#endif diff --git a/llvm/lib/Target/XCore/XCoreTargetAsmInfo.cpp b/llvm/lib/Target/XCore/XCoreTargetAsmInfo.cpp new file mode 100644 index 000000000000..e2fabe2a9865 --- /dev/null +++ b/llvm/lib/Target/XCore/XCoreTargetAsmInfo.cpp @@ -0,0 +1,201 @@ +//===-- XCoreTargetAsmInfo.cpp - XCore asm properties -----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declarations of the XCoreTargetAsmInfo properties. +// We use the small section flag for the CP relative and DP relative +// flags. If a section is small and writable then it is DP relative. If a +// section is small and not writable then it is CP relative. +// +//===----------------------------------------------------------------------===// + +#include "XCoreTargetAsmInfo.h" +#include "XCoreTargetMachine.h" +#include "llvm/GlobalVariable.h" +#include "llvm/ADT/StringExtras.h" + +using namespace llvm; + +XCoreTargetAsmInfo::XCoreTargetAsmInfo(const XCoreTargetMachine &TM) + : ELFTargetAsmInfo(TM), + Subtarget(TM.getSubtargetImpl()) { + TextSection = getUnnamedSection("\t.text", SectionFlags::Code); + DataSection = getNamedSection("\t.dp.data", SectionFlags::Writeable | + SectionFlags::Small); + BSSSection_ = getNamedSection("\t.dp.bss", SectionFlags::Writeable | + SectionFlags::BSS | SectionFlags::Small); + if (Subtarget->isXS1A()) { + ReadOnlySection = getNamedSection("\t.dp.rodata", SectionFlags::None | + SectionFlags::Writeable | + SectionFlags::Small); + } else { + ReadOnlySection = getNamedSection("\t.cp.rodata", SectionFlags::None | + SectionFlags::Small); + } + Data16bitsDirective = "\t.short\t"; + Data32bitsDirective = "\t.long\t"; + Data64bitsDirective = 0; + ZeroDirective = "\t.space\t"; + CommentString = "#"; + ConstantPoolSection = "\t.section\t.cp.rodata,\"ac\",@progbits"; + JumpTableDataSection = "\t.section\t.dp.data,\"awd\",@progbits"; + PrivateGlobalPrefix = ".L"; + AscizDirective = ".asciiz"; + WeakDefDirective = "\t.weak\t"; + WeakRefDirective = "\t.weak\t"; + SetDirective = "\t.set\t"; + + // Debug + HasLEB128 = true; + AbsoluteDebugSectionOffsets = true; + + DwarfAbbrevSection = "\t.section\t.debug_abbrev,\"\",@progbits"; + DwarfInfoSection = "\t.section\t.debug_info,\"\",@progbits"; + DwarfLineSection = "\t.section\t.debug_line,\"\",@progbits"; + DwarfFrameSection = "\t.section\t.debug_frame,\"\",@progbits"; + DwarfPubNamesSection = "\t.section\t.debug_pubnames,\"\",@progbits"; + DwarfPubTypesSection = "\t.section\t.debug_pubtypes,\"\",@progbits"; + DwarfStrSection = "\t.section\t.debug_str,\"\",@progbits"; + DwarfLocSection = "\t.section\t.debug_loc,\"\",@progbits"; + DwarfARangesSection = "\t.section\t.debug_aranges,\"\",@progbits"; + DwarfRangesSection = "\t.section\t.debug_ranges,\"\",@progbits"; + DwarfMacInfoSection = "\t.section\t.debug_macinfo,\"\",@progbits"; +} + +const Section* +XCoreTargetAsmInfo::SelectSectionForGlobal(const GlobalValue *GV) const { + SectionKind::Kind Kind = SectionKindForGlobal(GV); + + if (const GlobalVariable *GVar = dyn_cast(GV)) + { + if (!GVar->mayBeOverridden()) { + switch (Kind) { + case SectionKind::RODataMergeStr: + return MergeableStringSection(GVar); + case SectionKind::RODataMergeConst: + return getReadOnlySection(); + case SectionKind::ThreadData: + return DataSection; + case SectionKind::ThreadBSS: + return getBSSSection_(); + default: + break; + } + } + } + return ELFTargetAsmInfo::SelectSectionForGlobal(GV); +} + +const Section* +XCoreTargetAsmInfo::SelectSectionForMachineConst(const Type *Ty) const { + return MergeableConstSection(Ty); +} + +const Section* +XCoreTargetAsmInfo::MergeableConstSection(const GlobalVariable *GV) const { + Constant *C = GV->getInitializer(); + return MergeableConstSection(C->getType()); +} + +inline const Section* +XCoreTargetAsmInfo::MergeableConstSection(const Type *Ty) const { + const TargetData *TD = TM.getTargetData(); + + unsigned Size = TD->getABITypeSize(Ty); + if (Size == 4 || Size == 8 || Size == 16) { + std::string Name = ".cp.const" + utostr(Size); + + return getNamedSection(Name.c_str(), + SectionFlags::setEntitySize(SectionFlags::Mergeable | + SectionFlags::Small, + Size)); + } + + return getReadOnlySection(); +} + +const Section* XCoreTargetAsmInfo:: +MergeableStringSection(const GlobalVariable *GV) const { + // FIXME insert in correct mergable section + return getReadOnlySection(); +} + +unsigned XCoreTargetAsmInfo:: +SectionFlagsForGlobal(const GlobalValue *GV, + const char* Name) const { + unsigned Flags = ELFTargetAsmInfo::SectionFlagsForGlobal(GV, Name); + // Mask out unsupported flags + Flags &= ~(SectionFlags::Small | SectionFlags::TLS); + + // Set CP / DP relative flags + if (GV) { + SectionKind::Kind Kind = SectionKindForGlobal(GV); + switch (Kind) { + case SectionKind::ThreadData: + case SectionKind::ThreadBSS: + case SectionKind::Data: + case SectionKind::BSS: + case SectionKind::SmallData: + case SectionKind::SmallBSS: + Flags |= SectionFlags::Small; + break; + case SectionKind::ROData: + case SectionKind::RODataMergeStr: + case SectionKind::SmallROData: + if (Subtarget->isXS1A()) { + Flags |= SectionFlags::Writeable; + } + Flags |=SectionFlags::Small; + break; + case SectionKind::RODataMergeConst: + Flags |=SectionFlags::Small; + default: + break; + } + } + + return Flags; +} + +std::string XCoreTargetAsmInfo:: +printSectionFlags(unsigned flags) const { + std::string Flags = ",\""; + + if (!(flags & SectionFlags::Debug)) + Flags += 'a'; + if (flags & SectionFlags::Code) + Flags += 'x'; + if (flags & SectionFlags::Writeable) + Flags += 'w'; + if (flags & SectionFlags::Mergeable) + Flags += 'M'; + if (flags & SectionFlags::Strings) + Flags += 'S'; + if (flags & SectionFlags::TLS) + Flags += 'T'; + if (flags & SectionFlags::Small) { + if (flags & SectionFlags::Writeable) + Flags += 'd'; // DP relative + else + Flags += 'c'; // CP relative + } + + Flags += "\","; + + Flags += '@'; + + if (flags & SectionFlags::BSS) + Flags += "nobits"; + else + Flags += "progbits"; + + if (unsigned entitySize = SectionFlags::getEntitySize(flags)) + Flags += "," + utostr(entitySize); + + return Flags; +} diff --git a/llvm/lib/Target/XCore/XCoreTargetAsmInfo.h b/llvm/lib/Target/XCore/XCoreTargetAsmInfo.h new file mode 100644 index 000000000000..79fd36aa23ce --- /dev/null +++ b/llvm/lib/Target/XCore/XCoreTargetAsmInfo.h @@ -0,0 +1,45 @@ +//=====-- XCoreTargetAsmInfo.h - XCore asm properties ---------*- C++ -*--====// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of the XCoreTargetAsmInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef XCORETARGETASMINFO_H +#define XCORETARGETASMINFO_H + +#include "llvm/Target/ELFTargetAsmInfo.h" + +namespace llvm { + + // Forward declarations. + class XCoreTargetMachine; + class XCoreSubtarget; + + class XCoreTargetAsmInfo : public ELFTargetAsmInfo { + private: + const XCoreSubtarget *Subtarget; + public: + explicit XCoreTargetAsmInfo(const XCoreTargetMachine &TM); + + virtual const Section* SelectSectionForGlobal(const GlobalValue *GV) const; + virtual std::string printSectionFlags(unsigned flags) const; + const Section* MergeableConstSection(const GlobalVariable *GV) const; + inline const Section* MergeableConstSection(const Type *Ty) const; + const Section* MergeableStringSection(const GlobalVariable *GV) const; + virtual const Section* + SelectSectionForMachineConst(const Type *Ty) const; + virtual unsigned + SectionFlagsForGlobal(const GlobalValue *GV = NULL, + const char* name = NULL) const; + }; + +} // namespace llvm + +#endif diff --git a/llvm/lib/Target/XCore/XCoreTargetMachine.cpp b/llvm/lib/Target/XCore/XCoreTargetMachine.cpp new file mode 100644 index 000000000000..c6b9c78492ab --- /dev/null +++ b/llvm/lib/Target/XCore/XCoreTargetMachine.cpp @@ -0,0 +1,60 @@ +//===-- XCoreTargetMachine.cpp - Define TargetMachine for XCore -----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// +//===----------------------------------------------------------------------===// + +#include "XCoreTargetAsmInfo.h" +#include "XCoreTargetMachine.h" +#include "XCore.h" +#include "llvm/Module.h" +#include "llvm/PassManager.h" +#include "llvm/Target/TargetMachineRegistry.h" +using namespace llvm; + +namespace { + // Register the target. + RegisterTarget X("xcore", " XCore"); +} + +const TargetAsmInfo *XCoreTargetMachine::createTargetAsmInfo() const { + return new XCoreTargetAsmInfo(*this); +} + +/// XCoreTargetMachine ctor - Create an ILP32 architecture model +/// +XCoreTargetMachine::XCoreTargetMachine(const Module &M, const std::string &FS) + : Subtarget(*this, M, FS), + DataLayout("e-p:32:32:32-a0:0:32-f32:32:32-f64:32:32-i1:8:32-i8:8:32-" + "i16:16:32-i32:32:32-i64:32:32"), + InstrInfo(), + FrameInfo(*this), + TLInfo(*this) { +} + +unsigned XCoreTargetMachine::getModuleMatchQuality(const Module &M) { + std::string TT = M.getTargetTriple(); + if (TT.size() >= 6 && std::string(TT.begin(), TT.begin()+6) == "xcore-") + return 20; + + // Otherwise we don't match. + return 0; +} + +bool XCoreTargetMachine::addInstSelector(PassManagerBase &PM, bool Fast) { + PM.add(createXCoreISelDag(*this)); + return false; +} + +bool XCoreTargetMachine::addAssemblyEmitter(PassManagerBase &PM, bool Fast, + raw_ostream &Out) { + // Output assembly language. + PM.add(createXCoreCodePrinterPass(Out, *this)); + return false; +} diff --git a/llvm/lib/Target/XCore/XCoreTargetMachine.h b/llvm/lib/Target/XCore/XCoreTargetMachine.h new file mode 100644 index 000000000000..4fa200a66a48 --- /dev/null +++ b/llvm/lib/Target/XCore/XCoreTargetMachine.h @@ -0,0 +1,62 @@ +//===-- XCoreTargetMachine.h - Define TargetMachine for XCore ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the XCore specific subclass of TargetMachine. +// +//===----------------------------------------------------------------------===// + +#ifndef XCORETARGETMACHINE_H +#define XCORETARGETMACHINE_H + +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetData.h" +#include "XCoreFrameInfo.h" +#include "XCoreSubtarget.h" +#include "XCoreInstrInfo.h" +#include "XCoreISelLowering.h" + +namespace llvm { + +class Module; + +class XCoreTargetMachine : public LLVMTargetMachine { + XCoreSubtarget Subtarget; + const TargetData DataLayout; // Calculates type size & alignment + XCoreInstrInfo InstrInfo; + XCoreFrameInfo FrameInfo; + XCoreTargetLowering TLInfo; + +protected: + virtual const TargetAsmInfo *createTargetAsmInfo() const; + +public: + XCoreTargetMachine(const Module &M, const std::string &FS); + + virtual const XCoreInstrInfo *getInstrInfo() const { return &InstrInfo; } + virtual const XCoreFrameInfo *getFrameInfo() const { return &FrameInfo; } + virtual const XCoreSubtarget *getSubtargetImpl() const { return &Subtarget; } + virtual XCoreTargetLowering *getTargetLowering() const { + return const_cast(&TLInfo); + } + + virtual const TargetRegisterInfo *getRegisterInfo() const { + return &InstrInfo.getRegisterInfo(); + } + virtual const TargetData *getTargetData() const { return &DataLayout; } + static unsigned getModuleMatchQuality(const Module &M); + + // Pass Pipeline Configuration + virtual bool addInstSelector(PassManagerBase &PM, bool Fast); + virtual bool addAssemblyEmitter(PassManagerBase &PM, bool Fast, + raw_ostream &Out); +}; + +} // end namespace llvm + +#endif