Move function dependent resetting of a subtarget variable out of the

subtarget. This involved having the movt predicate take the current
function - since we care about size in instruction selection for
whether or not to use movw/movt take the function so we can check
the attributes. This required adding the current MachineFunction to
FastISel and propagating through.

llvm-svn: 212309
This commit is contained in:
Eric Christopher 2014-07-04 01:55:26 +00:00
parent ac90380b5e
commit c1058df66f
10 changed files with 28 additions and 26 deletions

View File

@ -49,6 +49,7 @@ class FastISel {
protected:
DenseMap<const Value *, unsigned> LocalValueMap;
FunctionLoweringInfo &FuncInfo;
MachineFunction *MF;
MachineRegisterInfo &MRI;
MachineFrameInfo &MFI;
MachineConstantPool &MCP;

View File

@ -1210,6 +1210,7 @@ FastISel::SelectOperator(const User *I, unsigned Opcode) {
FastISel::FastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo)
: FuncInfo(funcInfo),
MF(funcInfo.MF),
MRI(FuncInfo.MF->getRegInfo()),
MFI(*FuncInfo.MF->getFrameInfo()),
MCP(*FuncInfo.MF->getConstantPool()),

View File

@ -1887,7 +1887,8 @@ bool llvm::tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget,
unsigned NumBytes) {
// This optimisation potentially adds lots of load and store
// micro-operations, it's only really a great benefit to code-size.
if (!Subtarget.isMinSize())
if (!MF.getFunction()->getAttributes().hasAttribute(
AttributeSet::FunctionIndex, Attribute::MinSize))
return false;
// If only one register is pushed/popped, LLVM can use an LDR/STR

View File

@ -590,7 +590,7 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
// Use movw+movt when possible, it avoids constant pool entries.
// Non-darwin targets only support static movt relocations in FastISel.
if (Subtarget->useMovt() &&
if (Subtarget->useMovt(*FuncInfo.MF) &&
(Subtarget->isTargetMachO() || RelocM == Reloc::Static)) {
unsigned Opc;
unsigned char TF = 0;

View File

@ -2439,7 +2439,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
case ISD::Constant: {
unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
bool UseCP = true;
if (Subtarget->useMovt())
if (Subtarget->useMovt(*MF))
// Thumb2-aware targets have the MOVT instruction, so all immediates can
// be done with MOV + MOVT, at worst.
UseCP = false;

View File

@ -1695,7 +1695,8 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// FIXME: handle tail calls differently.
unsigned CallOpc;
bool HasMinSizeAttr = Subtarget->isMinSize();
bool HasMinSizeAttr = MF.getFunction()->getAttributes().hasAttribute(
AttributeSet::FunctionIndex, Attribute::MinSize);
if (Subtarget->isThumb()) {
if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
CallOpc = ARMISD::CALL_NOLINK;
@ -2442,7 +2443,7 @@ SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
// If we have T2 ops, we can materialize the address directly via movt/movw
// pair. This is always cheaper.
if (Subtarget->useMovt()) {
if (Subtarget->useMovt(DAG.getMachineFunction())) {
++NumMovwMovt;
// FIXME: Once remat is capable of dealing with instructions with register
// operands, expand this into two nodes.
@ -2464,7 +2465,7 @@ SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
Reloc::Model RelocM = getTargetMachine().getRelocationModel();
if (Subtarget->useMovt())
if (Subtarget->useMovt(DAG.getMachineFunction()))
++NumMovwMovt;
// FIXME: Once remat is capable of dealing with instructions with register
@ -2484,7 +2485,8 @@ SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op,
SelectionDAG &DAG) const {
assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported");
assert(Subtarget->useMovt() && "Windows on ARM expects to use movw/movt");
assert(Subtarget->useMovt(DAG.getMachineFunction()) &&
"Windows on ARM expects to use movw/movt");
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
EVT PtrVT = getPointerTy();

View File

@ -270,8 +270,8 @@ def UseNaClTrap : Predicate<"Subtarget->useNaClTrap()">,
def DontUseNaClTrap : Predicate<"!Subtarget->useNaClTrap()">;
// FIXME: Eventually this will be just "hasV6T2Ops".
def UseMovt : Predicate<"Subtarget->useMovt()">;
def DontUseMovt : Predicate<"!Subtarget->useMovt()">;
def UseMovt : Predicate<"Subtarget->useMovt(*MF)">;
def DontUseMovt : Predicate<"!Subtarget->useMovt(*MF)">;
def UseFPVMLx : Predicate<"Subtarget->useFPVMLx()">;
def UseMulOps : Predicate<"Subtarget->useMulOps()">;
@ -594,7 +594,7 @@ def so_imm2part : PatLeaf<(imm), [{
/// arm_i32imm - True for +V6T2, or true only if so_imm2part is true.
///
def arm_i32imm : PatLeaf<(imm), [{
if (Subtarget->useMovt())
if (Subtarget->useMovt(*MF))
return true;
return ARM_AM::isSOImmTwoPartVal((unsigned)N->getZExtValue());
}]>;

View File

@ -180,7 +180,6 @@ void ARMSubtarget::initializeEnvironment() {
HasVFPv4 = false;
HasFPARMv8 = false;
HasNEON = false;
MinSize = false;
UseNEONForSinglePrecisionFP = false;
UseMulOps = UseFusedMulOps;
SlowFPVMLx = false;
@ -231,9 +230,6 @@ void ARMSubtarget::resetSubtargetFeatures(const MachineFunction *MF) {
initializeEnvironment();
resetSubtargetFeatures(CPU, FS);
}
MinSize =
FnAttrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
}
void ARMSubtarget::resetSubtargetFeatures(StringRef CPU, StringRef FS) {
@ -444,3 +440,12 @@ bool ARMSubtarget::enablePostRAScheduler(
Mode = TargetSubtargetInfo::ANTIDEP_NONE;
return PostRAScheduler && OptLevel >= CodeGenOpt::Default;
}
bool ARMSubtarget::useMovt(const MachineFunction &MF) const {
// NOTE Windows on ARM needs to use mov.w/mov.t pairs to materialise 32-bit
// immediates as it is inherently position independent, and may be out of
// range otherwise.
return UseMovt && (isTargetWindows() ||
!MF.getFunction()->getAttributes().hasAttribute(
AttributeSet::FunctionIndex, Attribute::MinSize));
}

View File

@ -76,10 +76,6 @@ protected:
bool HasFPARMv8;
bool HasNEON;
/// MinSize - True if the function being compiled has the "minsize" attribute
/// and should be optimised for size at the expense of speed.
bool MinSize;
/// UseNEONForSinglePrecisionFP - if the NEONFP attribute has been
/// specified. Use the method useNEONForSinglePrecisionFP() to
/// determine if NEON should actually be used.
@ -319,7 +315,6 @@ public:
bool hasCrypto() const { return HasCrypto; }
bool hasCRC() const { return HasCRC; }
bool hasVirtualization() const { return HasVirtualization; }
bool isMinSize() const { return MinSize; }
bool useNEONForSinglePrecisionFP() const {
return hasNEON() && UseNEONForSinglePrecisionFP; }
@ -415,12 +410,8 @@ public:
bool isR9Reserved() const { return IsR9Reserved; }
bool useMovt() const {
// NOTE Windows on ARM needs to use mov.w/mov.t pairs to materialise 32-bit
// immediates as it is inherently position independent, and may be out of
// range otherwise.
return UseMovt && (isTargetWindows() || !isMinSize());
}
bool useMovt(const MachineFunction &MF) const;
bool supportsTailCall() const { return SupportsTailCall; }
bool allowsUnalignedMem() const { return AllowsUnalignedMem; }

View File

@ -1010,7 +1010,8 @@ bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) {
AttributeSet FnAttrs = MF.getFunction()->getAttributes();
OptimizeSize = FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
Attribute::OptimizeForSize);
MinimizeSize = STI->isMinSize();
MinimizeSize =
FnAttrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
BlockInfo.clear();
BlockInfo.resize(MF.getNumBlockIDs());