Now that predicates can be composed, simplify several of

the predicates by extending simple predicates to create
more complex predicates instead of duplicating the logic
for the simple predicates.

This doesn't reduce much redundancy in DAGISelEmitter.cpp's
generated source yet; that will require improvements to
DAGISelEmitter.cpp's instruction sorting, to make it more
effectively group nodes with similar predicates together.

llvm-svn: 57565
This commit is contained in:
Dan Gohman 2008-10-15 06:50:19 +00:00
parent 1f71a69cb9
commit 29ad439782
3 changed files with 124 additions and 194 deletions

View File

@ -573,227 +573,177 @@ def vnot_conv : PatFrag<(ops node:$in), (xor node:$in, immAllOnesV_bc)>;
def ineg : PatFrag<(ops node:$in), (sub 0, node:$in)>;
// load fragments.
def load : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
return LD->getExtensionType() == ISD::NON_EXTLOAD &&
LD->getAddressingMode() == ISD::UNINDEXED;
def unindexedload : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
return cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
}]>;
def load : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
return cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
}]>;
// extending load fragments.
def extloadi1 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
return LD->getExtensionType() == ISD::EXTLOAD &&
LD->getAddressingMode() == ISD::UNINDEXED &&
LD->getMemoryVT() == MVT::i1;
def extload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
return cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
}]>;
def extloadi8 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
return LD->getExtensionType() == ISD::EXTLOAD &&
LD->getAddressingMode() == ISD::UNINDEXED &&
LD->getMemoryVT() == MVT::i8;
def sextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
return cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
}]>;
def extloadi16 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
return LD->getExtensionType() == ISD::EXTLOAD &&
LD->getAddressingMode() == ISD::UNINDEXED &&
LD->getMemoryVT() == MVT::i16;
}]>;
def extloadi32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
return LD->getExtensionType() == ISD::EXTLOAD &&
LD->getAddressingMode() == ISD::UNINDEXED &&
LD->getMemoryVT() == MVT::i32;
}]>;
def extloadf32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
return LD->getExtensionType() == ISD::EXTLOAD &&
LD->getAddressingMode() == ISD::UNINDEXED &&
LD->getMemoryVT() == MVT::f32;
}]>;
def extloadf64 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
return LD->getExtensionType() == ISD::EXTLOAD &&
LD->getAddressingMode() == ISD::UNINDEXED &&
LD->getMemoryVT() == MVT::f64;
def zextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
return cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
}]>;
def sextloadi1 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
return LD->getExtensionType() == ISD::SEXTLOAD &&
LD->getAddressingMode() == ISD::UNINDEXED &&
LD->getMemoryVT() == MVT::i1;
def extloadi1 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
}]>;
def sextloadi8 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
return LD->getExtensionType() == ISD::SEXTLOAD &&
LD->getAddressingMode() == ISD::UNINDEXED &&
LD->getMemoryVT() == MVT::i8;
def extloadi8 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;
def sextloadi16 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
return LD->getExtensionType() == ISD::SEXTLOAD &&
LD->getAddressingMode() == ISD::UNINDEXED &&
LD->getMemoryVT() == MVT::i16;
def extloadi16 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
def sextloadi32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
return LD->getExtensionType() == ISD::SEXTLOAD &&
LD->getAddressingMode() == ISD::UNINDEXED &&
LD->getMemoryVT() == MVT::i32;
def extloadi32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
def extloadf32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::f32;
}]>;
def extloadf64 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::f64;
}]>;
def zextloadi1 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
return LD->getExtensionType() == ISD::ZEXTLOAD &&
LD->getAddressingMode() == ISD::UNINDEXED &&
LD->getMemoryVT() == MVT::i1;
def sextloadi1 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
}]>;
def zextloadi8 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
return LD->getExtensionType() == ISD::ZEXTLOAD &&
LD->getAddressingMode() == ISD::UNINDEXED &&
LD->getMemoryVT() == MVT::i8;
def sextloadi8 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;
def zextloadi16 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
return LD->getExtensionType() == ISD::ZEXTLOAD &&
LD->getAddressingMode() == ISD::UNINDEXED &&
LD->getMemoryVT() == MVT::i16;
def sextloadi16 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
def zextloadi32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
return LD->getExtensionType() == ISD::ZEXTLOAD &&
LD->getAddressingMode() == ISD::UNINDEXED &&
LD->getMemoryVT() == MVT::i32;
def sextloadi32 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
def zextloadi1 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
}]>;
def zextloadi8 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;
def zextloadi16 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
def zextloadi32 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
// store fragments.
def unindexedstore : PatFrag<(ops node:$val, node:$ptr),
(st node:$val, node:$ptr), [{
return cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
}]>;
def store : PatFrag<(ops node:$val, node:$ptr),
(st node:$val, node:$ptr), [{
StoreSDNode *ST = cast<StoreSDNode>(N);
return !ST->isTruncatingStore() &&
ST->getAddressingMode() == ISD::UNINDEXED;
(unindexedstore node:$val, node:$ptr), [{
return !cast<StoreSDNode>(N)->isTruncatingStore();
}]>;
// truncstore fragments.
def truncstore : PatFrag<(ops node:$val, node:$ptr),
(unindexedstore node:$val, node:$ptr), [{
return cast<StoreSDNode>(N)->isTruncatingStore();
}]>;
def truncstorei8 : PatFrag<(ops node:$val, node:$ptr),
(st node:$val, node:$ptr), [{
StoreSDNode *ST = cast<StoreSDNode>(N);
return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8 &&
ST->getAddressingMode() == ISD::UNINDEXED;
(truncstore node:$val, node:$ptr), [{
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;
def truncstorei16 : PatFrag<(ops node:$val, node:$ptr),
(st node:$val, node:$ptr), [{
StoreSDNode *ST = cast<StoreSDNode>(N);
return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16 &&
ST->getAddressingMode() == ISD::UNINDEXED;
(truncstore node:$val, node:$ptr), [{
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
def truncstorei32 : PatFrag<(ops node:$val, node:$ptr),
(st node:$val, node:$ptr), [{
StoreSDNode *ST = cast<StoreSDNode>(N);
return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i32 &&
ST->getAddressingMode() == ISD::UNINDEXED;
(truncstore node:$val, node:$ptr), [{
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
def truncstoref32 : PatFrag<(ops node:$val, node:$ptr),
(st node:$val, node:$ptr), [{
StoreSDNode *ST = cast<StoreSDNode>(N);
return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32 &&
ST->getAddressingMode() == ISD::UNINDEXED;
(truncstore node:$val, node:$ptr), [{
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32;
}]>;
def truncstoref64 : PatFrag<(ops node:$val, node:$ptr),
(st node:$val, node:$ptr), [{
StoreSDNode *ST = cast<StoreSDNode>(N);
return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f64 &&
ST->getAddressingMode() == ISD::UNINDEXED;
(truncstore node:$val, node:$ptr), [{
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f64;
}]>;
// indexed store fragments.
def pre_store : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
StoreSDNode *ST = cast<StoreSDNode>(N);
ISD::MemIndexedMode AM = ST->getAddressingMode();
return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
!ST->isTruncatingStore();
def istore : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
return !cast<StoreSDNode>(N)->isTruncatingStore();
}]>;
def pre_store : PatFrag<(ops node:$val, node:$base, node:$offset),
(istore node:$val, node:$base, node:$offset), [{
ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
return AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
}]>;
def itruncstore : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
return cast<StoreSDNode>(N)->isTruncatingStore();
}]>;
def pre_truncst : PatFrag<(ops node:$val, node:$base, node:$offset),
(itruncstore node:$val, node:$base, node:$offset), [{
ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
return AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
}]>;
def pre_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
StoreSDNode *ST = cast<StoreSDNode>(N);
ISD::MemIndexedMode AM = ST->getAddressingMode();
return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i1;
(pre_truncst node:$val, node:$base, node:$offset), [{
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
}]>;
def pre_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
StoreSDNode *ST = cast<StoreSDNode>(N);
ISD::MemIndexedMode AM = ST->getAddressingMode();
return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8;
(pre_truncst node:$val, node:$base, node:$offset), [{
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;
def pre_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
StoreSDNode *ST = cast<StoreSDNode>(N);
ISD::MemIndexedMode AM = ST->getAddressingMode();
return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16;
(pre_truncst node:$val, node:$base, node:$offset), [{
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
def pre_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
StoreSDNode *ST = cast<StoreSDNode>(N);
ISD::MemIndexedMode AM = ST->getAddressingMode();
return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i32;
(pre_truncst node:$val, node:$base, node:$offset), [{
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
def pre_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
StoreSDNode *ST = cast<StoreSDNode>(N);
ISD::MemIndexedMode AM = ST->getAddressingMode();
return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32;
(pre_truncst node:$val, node:$base, node:$offset), [{
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32;
}]>;
def post_store : PatFrag<(ops node:$val, node:$ptr, node:$offset),
(ist node:$val, node:$ptr, node:$offset), [{
StoreSDNode *ST = cast<StoreSDNode>(N);
ISD::MemIndexedMode AM = ST->getAddressingMode();
return !ST->isTruncatingStore() &&
(AM == ISD::POST_INC || AM == ISD::POST_DEC);
(istore node:$val, node:$ptr, node:$offset), [{
ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
return AM == ISD::POST_INC || AM == ISD::POST_DEC;
}]>;
def post_truncst : PatFrag<(ops node:$val, node:$base, node:$offset),
(itruncstore node:$val, node:$base, node:$offset), [{
ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
return AM == ISD::POST_INC || AM == ISD::POST_DEC;
}]>;
def post_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
StoreSDNode *ST = cast<StoreSDNode>(N);
ISD::MemIndexedMode AM = ST->getAddressingMode();
return (AM == ISD::POST_INC || AM == ISD::POST_DEC) &&
ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i1;
(post_truncst node:$val, node:$base, node:$offset), [{
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
}]>;
def post_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
StoreSDNode *ST = cast<StoreSDNode>(N);
ISD::MemIndexedMode AM = ST->getAddressingMode();
return (AM == ISD::POST_INC || AM == ISD::POST_DEC) &&
ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8;
(post_truncst node:$val, node:$base, node:$offset), [{
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;
def post_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
StoreSDNode *ST = cast<StoreSDNode>(N);
ISD::MemIndexedMode AM = ST->getAddressingMode();
return (AM == ISD::POST_INC || AM == ISD::POST_DEC) &&
ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16;
(post_truncst node:$val, node:$base, node:$offset), [{
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
def post_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
StoreSDNode *ST = cast<StoreSDNode>(N);
ISD::MemIndexedMode AM = ST->getAddressingMode();
return (AM == ISD::POST_INC || AM == ISD::POST_DEC) &&
ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i32;
(post_truncst node:$val, node:$base, node:$offset), [{
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
def post_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
StoreSDNode *ST = cast<StoreSDNode>(N);
ISD::MemIndexedMode AM = ST->getAddressingMode();
return (AM == ISD::POST_INC || AM == ISD::POST_DEC) &&
ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32;
(post_truncst node:$val, node:$base, node:$offset), [{
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32;
}]>;
// setcc convenience fragments.

View File

@ -254,10 +254,8 @@ def i32immSExt8 : PatLeaf<(i32 imm), [{
// Helper fragments for loads.
// It's always safe to treat a anyext i16 load as a i32 load if the i16 is
// known to be 32-bit aligned or better. Ditto for i8 to i16.
def loadi16 : PatFrag<(ops node:$ptr), (i16 (ld node:$ptr)), [{
def loadi16 : PatFrag<(ops node:$ptr), (i16 (unindexedload node:$ptr)), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
if (LD->getAddressingMode() != ISD::UNINDEXED)
return false;
ISD::LoadExtType ExtType = LD->getExtensionType();
if (ExtType == ISD::NON_EXTLOAD)
return true;
@ -266,20 +264,16 @@ def loadi16 : PatFrag<(ops node:$ptr), (i16 (ld node:$ptr)), [{
return false;
}]>;
def loadi16_anyext : PatFrag<(ops node:$ptr), (i32 (ld node:$ptr)), [{
def loadi16_anyext : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
if (LD->getAddressingMode() != ISD::UNINDEXED)
return false;
ISD::LoadExtType ExtType = LD->getExtensionType();
if (ExtType == ISD::EXTLOAD)
return LD->getAlignment() >= 2 && !LD->isVolatile();
return false;
}]>;
def loadi32 : PatFrag<(ops node:$ptr), (i32 (ld node:$ptr)), [{
def loadi32 : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
if (LD->getAddressingMode() != ISD::UNINDEXED)
return false;
ISD::LoadExtType ExtType = LD->getExtensionType();
if (ExtType == ISD::NON_EXTLOAD)
return true;
@ -288,12 +282,10 @@ def loadi32 : PatFrag<(ops node:$ptr), (i32 (ld node:$ptr)), [{
return false;
}]>;
def nvloadi32 : PatFrag<(ops node:$ptr), (i32 (ld node:$ptr)), [{
def nvloadi32 : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
if (LD->isVolatile())
return false;
if (LD->getAddressingMode() != ISD::UNINDEXED)
return false;
ISD::LoadExtType ExtType = LD->getExtensionType();
if (ExtType == ISD::NON_EXTLOAD)
return true;

View File

@ -98,19 +98,13 @@ def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
// Like 'store', but always requires vector alignment.
def alignedstore : PatFrag<(ops node:$val, node:$ptr),
(st node:$val, node:$ptr), [{
StoreSDNode *ST = cast<StoreSDNode>(N);
return !ST->isTruncatingStore() &&
ST->getAddressingMode() == ISD::UNINDEXED &&
ST->getAlignment() >= 16;
(store node:$val, node:$ptr), [{
return cast<StoreSDNode>(N)->getAlignment() >= 16;
}]>;
// Like 'load', but always requires vector alignment.
def alignedload : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
return LD->getExtensionType() == ISD::NON_EXTLOAD &&
LD->getAddressingMode() == ISD::UNINDEXED &&
LD->getAlignment() >= 16;
def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
return cast<LoadSDNode>(N)->getAlignment() >= 16;
}]>;
def alignedloadfsf32 : PatFrag<(ops node:$ptr), (f32 (alignedload node:$ptr))>;
@ -125,11 +119,8 @@ def alignedloadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (alignedload node:$ptr))>
// be naturally aligned on some targets but not on others.
// FIXME: Actually implement support for targets that don't require the
// alignment. This probably wants a subtarget predicate.
def memop : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
return LD->getExtensionType() == ISD::NON_EXTLOAD &&
LD->getAddressingMode() == ISD::UNINDEXED &&
LD->getAlignment() >= 16;
def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
return cast<LoadSDNode>(N)->getAlignment() >= 16;
}]>;
def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
@ -143,11 +134,8 @@ def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
// SSSE3 uses MMX registers for some instructions. They aren't aligned on a
// 16-byte boundary.
// FIXME: 8 byte alignment for mmx reads is not required
def memop64 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
return LD->getExtensionType() == ISD::NON_EXTLOAD &&
LD->getAddressingMode() == ISD::UNINDEXED &&
LD->getAlignment() >= 8;
def memop64 : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
return cast<LoadSDNode>(N)->getAlignment() >= 8;
}]>;
def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;