For the ARM integrated assembler add checking of the

alignments on vld/vst instructions.  And report errors for
alignments that are not supported.

While this is a large diff and an big test case, the changes
are very straight forward.  But pretty much had to touch
all vld/vst instructions changing the addrmode to one of the
new ones that where added will do the proper checking for
the specific instruction.

FYI, re-committing this with a tweak so MemoryOp's default
constructor is trivial and will work with MSVC 2012. Thanks
to Reid Kleckner and Jim Grosbach for help with the tweak.

rdar://11312406

llvm-svn: 205986
This commit is contained in:
Kevin Enderby 2014-04-10 20:18:58 +00:00
parent 4f469d65cc
commit 488f20b64e
4 changed files with 9248 additions and 433 deletions

View File

@ -991,6 +991,81 @@ def addrmode6oneL32 : Operand<i32>,
let EncoderMethod = "getAddrMode6OneLane32AddressOpValue";
}
// Base class for addrmode6 with specific alignment restrictions.
class AddrMode6Align : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrMode6", [], [SDNPWantParent]>{
let PrintMethod = "printAddrMode6Operand";
let MIOperandInfo = (ops GPR:$addr, i32imm:$align);
let EncoderMethod = "getAddrMode6AddressOpValue";
let DecoderMethod = "DecodeAddrMode6Operand";
}
// Special version of addrmode6 to handle no allowed alignment encoding for
// VLD/VST instructions and checking the alignment is not specified.
def AddrMode6AlignNoneAsmOperand : AsmOperandClass {
let Name = "AlignedMemoryNone";
let DiagnosticType = "AlignedMemoryRequiresNone";
}
def addrmode6alignNone : AddrMode6Align {
// The alignment specifier can only be omitted.
let ParserMatchClass = AddrMode6AlignNoneAsmOperand;
}
// Special version of addrmode6 to handle 16-bit alignment encoding for
// VLD/VST instructions and checking the alignment value.
def AddrMode6Align16AsmOperand : AsmOperandClass {
let Name = "AlignedMemory16";
let DiagnosticType = "AlignedMemoryRequires16";
}
def addrmode6align16 : AddrMode6Align {
// The alignment specifier can only be 16 or omitted.
let ParserMatchClass = AddrMode6Align16AsmOperand;
}
// Special version of addrmode6 to handle 32-bit alignment encoding for
// VLD/VST instructions and checking the alignment value.
def AddrMode6Align32AsmOperand : AsmOperandClass {
let Name = "AlignedMemory32";
let DiagnosticType = "AlignedMemoryRequires32";
}
def addrmode6align32 : AddrMode6Align {
// The alignment specifier can only be 32 or omitted.
let ParserMatchClass = AddrMode6Align32AsmOperand;
}
// Special version of addrmode6 to handle 64-bit alignment encoding for
// VLD/VST instructions and checking the alignment value.
def AddrMode6Align64AsmOperand : AsmOperandClass {
let Name = "AlignedMemory64";
let DiagnosticType = "AlignedMemoryRequires64";
}
def addrmode6align64 : AddrMode6Align {
// The alignment specifier can only be 64 or omitted.
let ParserMatchClass = AddrMode6Align64AsmOperand;
}
// Special version of addrmode6 to handle 64-bit or 128-bit alignment encoding
// for VLD/VST instructions and checking the alignment value.
def AddrMode6Align64or128AsmOperand : AsmOperandClass {
let Name = "AlignedMemory64or128";
let DiagnosticType = "AlignedMemoryRequires64or128";
}
def addrmode6align64or128 : AddrMode6Align {
// The alignment specifier can only be 64, 128 or omitted.
let ParserMatchClass = AddrMode6Align64or128AsmOperand;
}
// Special version of addrmode6 to handle 64-bit, 128-bit or 256-bit alignment
// encoding for VLD/VST instructions and checking the alignment value.
def AddrMode6Align64or128or256AsmOperand : AsmOperandClass {
let Name = "AlignedMemory64or128or256";
let DiagnosticType = "AlignedMemoryRequires64or128or256";
}
def addrmode6align64or128or256 : AddrMode6Align {
// The alignment specifier can only be 64, 128, 256 or omitted.
let ParserMatchClass = AddrMode6Align64or128or256AsmOperand;
}
// Special version of addrmode6 to handle alignment encoding for VLD-dup
// instructions, specifically VLD4-dup.
def addrmode6dup : Operand<i32>,
@ -1003,6 +1078,69 @@ def addrmode6dup : Operand<i32>,
let ParserMatchClass = AddrMode6AsmOperand;
}
// Base class for addrmode6dup with specific alignment restrictions.
class AddrMode6DupAlign : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrMode6", [], [SDNPWantParent]>{
let PrintMethod = "printAddrMode6Operand";
let MIOperandInfo = (ops GPR:$addr, i32imm);
let EncoderMethod = "getAddrMode6DupAddressOpValue";
}
// Special version of addrmode6 to handle no allowed alignment encoding for
// VLD-dup instruction and checking the alignment is not specified.
def AddrMode6dupAlignNoneAsmOperand : AsmOperandClass {
let Name = "DupAlignedMemoryNone";
let DiagnosticType = "DupAlignedMemoryRequiresNone";
}
def addrmode6dupalignNone : AddrMode6DupAlign {
// The alignment specifier can only be omitted.
let ParserMatchClass = AddrMode6dupAlignNoneAsmOperand;
}
// Special version of addrmode6 to handle 16-bit alignment encoding for VLD-dup
// instruction and checking the alignment value.
def AddrMode6dupAlign16AsmOperand : AsmOperandClass {
let Name = "DupAlignedMemory16";
let DiagnosticType = "DupAlignedMemoryRequires16";
}
def addrmode6dupalign16 : AddrMode6DupAlign {
// The alignment specifier can only be 16 or omitted.
let ParserMatchClass = AddrMode6dupAlign16AsmOperand;
}
// Special version of addrmode6 to handle 32-bit alignment encoding for VLD-dup
// instruction and checking the alignment value.
def AddrMode6dupAlign32AsmOperand : AsmOperandClass {
let Name = "DupAlignedMemory32";
let DiagnosticType = "DupAlignedMemoryRequires32";
}
def addrmode6dupalign32 : AddrMode6DupAlign {
// The alignment specifier can only be 32 or omitted.
let ParserMatchClass = AddrMode6dupAlign32AsmOperand;
}
// Special version of addrmode6 to handle 64-bit alignment encoding for VLD
// instructions and checking the alignment value.
def AddrMode6dupAlign64AsmOperand : AsmOperandClass {
let Name = "DupAlignedMemory64";
let DiagnosticType = "DupAlignedMemoryRequires64";
}
def addrmode6dupalign64 : AddrMode6DupAlign {
// The alignment specifier can only be 64 or omitted.
let ParserMatchClass = AddrMode6dupAlign64AsmOperand;
}
// Special version of addrmode6 to handle 64-bit or 128-bit alignment encoding
// for VLD instructions and checking the alignment value.
def AddrMode6dupAlign64or128AsmOperand : AsmOperandClass {
let Name = "DupAlignedMemory64or128";
let DiagnosticType = "DupAlignedMemoryRequires64or128";
}
def addrmode6dupalign64or128 : AddrMode6DupAlign {
// The alignment specifier can only be 64, 128 or omitted.
let ParserMatchClass = AddrMode6dupAlign64or128AsmOperand;
}
// addrmodepc := pc + reg
//
def addrmodepc : Operand<i32>,

File diff suppressed because it is too large Load Diff

View File

@ -416,7 +416,7 @@ class ARMOperand : public MCParsedAsmOperand {
k_Token
} Kind;
SMLoc StartLoc, EndLoc;
SMLoc StartLoc, EndLoc, AlignmentLoc;
SmallVector<unsigned, 8> Registers;
struct CCOp {
@ -633,6 +633,12 @@ public:
/// operand.
SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
/// getAlignmentLoc - Get the location of the Alignment token of this operand.
SMLoc getAlignmentLoc() const {
assert(Kind == k_Memory && "Invalid access!");
return AlignmentLoc;
}
ARMCC::CondCodes getCondCode() const {
assert(Kind == k_CondCode && "Invalid access!");
return CC.Val;
@ -1089,12 +1095,12 @@ public:
bool isPostIdxReg() const {
return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
}
bool isMemNoOffset(bool alignOK = false) const {
bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
if (!isMem())
return false;
// No offset of any kind.
return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
(alignOK || Memory.Alignment == 0);
(alignOK || Memory.Alignment == Alignment);
}
bool isMemPCRelImm12() const {
if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
@ -1110,6 +1116,65 @@ public:
bool isAlignedMemory() const {
return isMemNoOffset(true);
}
bool isAlignedMemoryNone() const {
return isMemNoOffset(false, 0);
}
bool isDupAlignedMemoryNone() const {
return isMemNoOffset(false, 0);
}
bool isAlignedMemory16() const {
if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
return true;
return isMemNoOffset(false, 0);
}
bool isDupAlignedMemory16() const {
if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
return true;
return isMemNoOffset(false, 0);
}
bool isAlignedMemory32() const {
if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
return true;
return isMemNoOffset(false, 0);
}
bool isDupAlignedMemory32() const {
if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
return true;
return isMemNoOffset(false, 0);
}
bool isAlignedMemory64() const {
if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
return true;
return isMemNoOffset(false, 0);
}
bool isDupAlignedMemory64() const {
if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
return true;
return isMemNoOffset(false, 0);
}
bool isAlignedMemory64or128() const {
if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
return true;
if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
return true;
return isMemNoOffset(false, 0);
}
bool isDupAlignedMemory64or128() const {
if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
return true;
if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
return true;
return isMemNoOffset(false, 0);
}
bool isAlignedMemory64or128or256() const {
if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
return true;
if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
return true;
if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
return true;
return isMemNoOffset(false, 0);
}
bool isAddrMode2() const {
if (!isMem() || Memory.Alignment != 0) return false;
// Check for register offset.
@ -1926,6 +1991,50 @@ public:
Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
}
void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
addAlignedMemoryOperands(Inst, N);
}
void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
addAlignedMemoryOperands(Inst, N);
}
void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
addAlignedMemoryOperands(Inst, N);
}
void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
addAlignedMemoryOperands(Inst, N);
}
void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
addAlignedMemoryOperands(Inst, N);
}
void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
addAlignedMemoryOperands(Inst, N);
}
void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
addAlignedMemoryOperands(Inst, N);
}
void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
addAlignedMemoryOperands(Inst, N);
}
void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
addAlignedMemoryOperands(Inst, N);
}
void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
addAlignedMemoryOperands(Inst, N);
}
void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
addAlignedMemoryOperands(Inst, N);
}
void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
assert(N == 3 && "Invalid number of operands!");
int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
@ -2523,7 +2632,8 @@ public:
unsigned ShiftImm,
unsigned Alignment,
bool isNegative,
SMLoc S, SMLoc E) {
SMLoc S, SMLoc E,
SMLoc AlignmentLoc = SMLoc()) {
ARMOperand *Op = new ARMOperand(k_Memory);
Op->Memory.BaseRegNum = BaseRegNum;
Op->Memory.OffsetImm = OffsetImm;
@ -2534,6 +2644,7 @@ public:
Op->Memory.isNegative = isNegative;
Op->StartLoc = S;
Op->EndLoc = E;
Op->AlignmentLoc = AlignmentLoc;
return Op;
}
@ -4346,6 +4457,7 @@ parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
if (Parser.getTok().is(AsmToken::Colon)) {
Parser.Lex(); // Eat the ':'.
E = Parser.getTok().getLoc();
SMLoc AlignmentLoc = Tok.getLoc();
const MCExpr *Expr;
if (getParser().parseExpression(Expr))
@ -4380,7 +4492,7 @@ parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// the is*() predicates.
Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
ARM_AM::no_shift, 0, Align,
false, S, E));
false, S, E, AlignmentLoc));
// If there's a pre-indexing writeback marker, '!', just add it as a token
// operand.
@ -7968,6 +8080,42 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
return Error(ErrorLoc, "immediate operand must be in the range [0,239]");
}
case Match_AlignedMemoryRequiresNone:
case Match_DupAlignedMemoryRequiresNone:
case Match_AlignedMemoryRequires16:
case Match_DupAlignedMemoryRequires16:
case Match_AlignedMemoryRequires32:
case Match_DupAlignedMemoryRequires32:
case Match_AlignedMemoryRequires64:
case Match_DupAlignedMemoryRequires64:
case Match_AlignedMemoryRequires64or128:
case Match_DupAlignedMemoryRequires64or128:
case Match_AlignedMemoryRequires64or128or256:
{
SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getAlignmentLoc();
if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
switch (MatchResult) {
default:
llvm_unreachable("Missing Match_Aligned type");
case Match_AlignedMemoryRequiresNone:
case Match_DupAlignedMemoryRequiresNone:
return Error(ErrorLoc, "alignment must be omitted");
case Match_AlignedMemoryRequires16:
case Match_DupAlignedMemoryRequires16:
return Error(ErrorLoc, "alignment must be 16 or omitted");
case Match_AlignedMemoryRequires32:
case Match_DupAlignedMemoryRequires32:
return Error(ErrorLoc, "alignment must be 32 or omitted");
case Match_AlignedMemoryRequires64:
case Match_DupAlignedMemoryRequires64:
return Error(ErrorLoc, "alignment must be 64 or omitted");
case Match_AlignedMemoryRequires64or128:
case Match_DupAlignedMemoryRequires64or128:
return Error(ErrorLoc, "alignment must be 64, 128 or omitted");
case Match_AlignedMemoryRequires64or128or256:
return Error(ErrorLoc, "alignment must be 64, 128, 256 or omitted");
}
}
}
llvm_unreachable("Implement any new match types added!");

File diff suppressed because it is too large Load Diff