Allow pinsrw/pinsrb/pextrb/pextrw/movmskps/movmskpd/pmovmskb/extractps instructions to parse either GR32 or GR64 without resorting to duplicating instructions.

llvm-svn: 192567
This commit is contained in:
Craig Topper 2013-10-14 04:55:01 +00:00
parent 4432208884
commit a422b09ae3
8 changed files with 117 additions and 95 deletions

View File

@ -869,6 +869,12 @@ struct X86Operand : public MCParsedAsmOperand {
bool isReg() const { return Kind == Register; }
bool isGR32orGR64() const {
return Kind == Register &&
(X86MCRegisterClasses[X86::GR32RegClassID].contains(getReg()) ||
X86MCRegisterClasses[X86::GR64RegClassID].contains(getReg()));
}
void addExpr(MCInst &Inst, const MCExpr *Expr) const {
// Add as immediates when possible.
if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
@ -882,6 +888,37 @@ struct X86Operand : public MCParsedAsmOperand {
Inst.addOperand(MCOperand::CreateReg(getReg()));
}
static unsigned getGR32FromGR64(unsigned RegNo) {
switch (RegNo) {
default: llvm_unreachable("Unexpected register");
case X86::RAX: return X86::EAX;
case X86::RCX: return X86::ECX;
case X86::RDX: return X86::EDX;
case X86::RBX: return X86::EBX;
case X86::RBP: return X86::EBP;
case X86::RSP: return X86::ESP;
case X86::RSI: return X86::ESI;
case X86::RDI: return X86::EDI;
case X86::R8: return X86::R8D;
case X86::R9: return X86::R9D;
case X86::R10: return X86::R10D;
case X86::R11: return X86::R11D;
case X86::R12: return X86::R12D;
case X86::R13: return X86::R13D;
case X86::R14: return X86::R14D;
case X86::R15: return X86::R15D;
case X86::RIP: return X86::EIP;
}
}
void addGR32orGR64Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
unsigned RegNo = getReg();
if (X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo))
RegNo = getGR32FromGR64(RegNo);
Inst.addOperand(MCOperand::CreateReg(RegNo));
}
void addImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
addExpr(Inst, getImm());

View File

@ -502,6 +502,14 @@ class ImmZExtAsmOperandClass : AsmOperandClass {
let RenderMethod = "addImmOperands";
}
def X86GR32orGR64AsmOperand : AsmOperandClass {
let Name = "GR32orGR64";
}
def GR32orGR64 : RegisterOperand<GR32> {
let ParserMatchClass = X86GR32orGR64AsmOperand;
}
// Sign-extended immediate classes. We don't need to define the full lattice
// here because there is no instruction with an ambiguity between ImmSExti64i32
// and ImmSExti32i8.

View File

@ -546,18 +546,18 @@ let Constraints = "$src1 = $dst" in {
// Extract / Insert
def MMX_PEXTRWirri: MMXIi8<0xC5, MRMSrcReg,
(outs GR32:$dst), (ins VR64:$src1, i32i8imm:$src2),
"pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, (int_x86_mmx_pextr_w VR64:$src1,
(iPTR imm:$src2)))],
IIC_MMX_PEXTR>, Sched<[WriteShuffle]>;
(outs GR32orGR64:$dst), (ins VR64:$src1, i32i8imm:$src2),
"pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32orGR64:$dst, (int_x86_mmx_pextr_w VR64:$src1,
(iPTR imm:$src2)))],
IIC_MMX_PEXTR>, Sched<[WriteShuffle]>;
let Constraints = "$src1 = $dst" in {
def MMX_PINSRWirri : MMXIi8<0xC4, MRMSrcReg,
(outs VR64:$dst),
(ins VR64:$src1, GR32:$src2, i32i8imm:$src3),
(ins VR64:$src1, GR32orGR64:$src2, i32i8imm:$src3),
"pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR64:$dst, (int_x86_mmx_pinsr_w VR64:$src1,
GR32:$src2, (iPTR imm:$src3)))],
GR32orGR64:$src2, (iPTR imm:$src3)))],
IIC_MMX_PINSRW>, Sched<[WriteShuffle]>;
def MMX_PINSRWirmi : MMXIi8<0xC4, MRMSrcMem,
@ -571,9 +571,10 @@ let Constraints = "$src1 = $dst" in {
}
// Mask creation
def MMX_PMOVMSKBrr : MMXI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR64:$src),
def MMX_PMOVMSKBrr : MMXI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
(ins VR64:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
[(set GR32:$dst,
[(set GR32orGR64:$dst,
(int_x86_mmx_pmovmskb VR64:$src))]>;

View File

@ -2706,14 +2706,10 @@ let Predicates = [UseSSE2] in {
/// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
Domain d> {
def rr32 : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
[(set GR32:$dst, (Int RC:$src))], IIC_SSE_MOVMSK, d>,
Sched<[WriteVecLogic]>;
let isAsmParserOnly = 1, hasSideEffects = 0 in
def rr64 : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins RC:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"), [],
IIC_SSE_MOVMSK, d>, REX_W, Sched<[WriteVecLogic]>;
def rr : PI<0x50, MRMSrcReg, (outs GR32orGR64:$dst), (ins RC:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
[(set GR32orGR64:$dst, (Int RC:$src))], IIC_SSE_MOVMSK, d>,
Sched<[WriteVecLogic]>;
}
let Predicates = [HasAVX] in {
@ -2730,15 +2726,15 @@ let Predicates = [HasAVX] in {
OpSize, VEX, VEX_L;
def : Pat<(i32 (X86fgetsign FR32:$src)),
(VMOVMSKPSrr32 (COPY_TO_REGCLASS FR32:$src, VR128))>;
(VMOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
def : Pat<(i64 (X86fgetsign FR32:$src)),
(SUBREG_TO_REG (i64 0),
(VMOVMSKPSrr32 (COPY_TO_REGCLASS FR32:$src, VR128)), sub_32bit)>;
(VMOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128)), sub_32bit)>;
def : Pat<(i32 (X86fgetsign FR64:$src)),
(VMOVMSKPDrr32 (COPY_TO_REGCLASS FR64:$src, VR128))>;
(VMOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
def : Pat<(i64 (X86fgetsign FR64:$src)),
(SUBREG_TO_REG (i64 0),
(VMOVMSKPDrr32 (COPY_TO_REGCLASS FR64:$src, VR128)), sub_32bit)>;
(VMOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128)), sub_32bit)>;
}
defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
@ -2747,18 +2743,18 @@ defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
SSEPackedDouble>, TB, OpSize;
def : Pat<(i32 (X86fgetsign FR32:$src)),
(MOVMSKPSrr32 (COPY_TO_REGCLASS FR32:$src, VR128))>,
(MOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128))>,
Requires<[UseSSE1]>;
def : Pat<(i64 (X86fgetsign FR32:$src)),
(SUBREG_TO_REG (i64 0),
(MOVMSKPSrr32 (COPY_TO_REGCLASS FR32:$src, VR128)), sub_32bit)>,
(MOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128)), sub_32bit)>,
Requires<[UseSSE1]>;
def : Pat<(i32 (X86fgetsign FR64:$src)),
(MOVMSKPDrr32 (COPY_TO_REGCLASS FR64:$src, VR128))>,
(MOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128))>,
Requires<[UseSSE2]>;
def : Pat<(i64 (X86fgetsign FR64:$src)),
(SUBREG_TO_REG (i64 0),
(MOVMSKPDrr32 (COPY_TO_REGCLASS FR64:$src, VR128)), sub_32bit)>,
(MOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128)), sub_32bit)>,
Requires<[UseSSE2]>;
//===---------------------------------------------------------------------===//
@ -4248,13 +4244,13 @@ let ExeDomain = SSEPackedInt in {
multiclass sse2_pinsrw<bit Is2Addr = 1> {
def rri : Ii8<0xC4, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1,
GR32:$src2, i32i8imm:$src3),
GR32orGR64:$src2, i32i8imm:$src3),
!if(Is2Addr,
"pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
"vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst,
(X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))], IIC_SSE_PINSRW>,
Sched<[WriteShuffle]>;
(X86pinsrw VR128:$src1, GR32orGR64:$src2, imm:$src3))],
IIC_SSE_PINSRW>, Sched<[WriteShuffle]>;
def rmi : Ii8<0xC4, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1,
i16mem:$src2, i32i8imm:$src3),
@ -4270,36 +4266,24 @@ multiclass sse2_pinsrw<bit Is2Addr = 1> {
// Extract
let Predicates = [HasAVX] in
def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
(outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
(outs GR32orGR64:$dst), (ins VR128:$src1, i32i8imm:$src2),
"vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
imm:$src2))]>, TB, OpSize, VEX,
[(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
imm:$src2))]>, TB, OpSize, VEX,
Sched<[WriteShuffle]>;
def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
(outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
(outs GR32orGR64:$dst), (ins VR128:$src1, i32i8imm:$src2),
"pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
imm:$src2))], IIC_SSE_PEXTRW>,
[(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
imm:$src2))], IIC_SSE_PEXTRW>,
Sched<[WriteShuffleLd, ReadAfterLd]>;
// Insert
let Predicates = [HasAVX] in {
defm VPINSRW : sse2_pinsrw<0>, TB, OpSize, VEX_4V;
let isAsmParserOnly = 1, hasSideEffects = 0 in
def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
"vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[]>, TB, OpSize, VEX_4V, Sched<[WriteShuffle]>;
}
let Predicates = [HasAVX] in
defm VPINSRW : sse2_pinsrw<0>, TB, OpSize, VEX_4V;
let Predicates = [UseSSE2], Constraints = "$src1 = $dst" in {
defm PINSRW : sse2_pinsrw, TB, OpSize;
let isAsmParserOnly = 1, hasSideEffects = 0 in
def PINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
"pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[]>, TB, OpSize, Sched<[WriteShuffle]>;
} // Predicates = [UseSSE2], Constraints = "$src1 = $dst"
let Predicates = [UseSSE2], Constraints = "$src1 = $dst" in
defm PINSRW : sse2_pinsrw, TB, OpSize;
} // ExeDomain = SSEPackedInt
@ -4309,27 +4293,24 @@ let Predicates = [UseSSE2], Constraints = "$src1 = $dst" in {
let ExeDomain = SSEPackedInt, SchedRW = [WriteVecLogic] in {
def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
(ins VR128:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
[(set GR32orGR64:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
IIC_SSE_MOVMSK>, VEX;
def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
"pmovmskb\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK>, VEX;
let Predicates = [HasAVX2] in {
def VPMOVMSKBYrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR256:$src),
def VPMOVMSKBYrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
(ins VR256:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (int_x86_avx2_pmovmskb VR256:$src))]>, VEX, VEX_L;
def VPMOVMSKBYr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
"pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
[(set GR32orGR64:$dst, (int_x86_avx2_pmovmskb VR256:$src))]>,
VEX, VEX_L;
}
def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst), (ins VR128:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
[(set GR32orGR64:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
IIC_SSE_MOVMSK>;
def PMOVMSKBr64r : PDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
"pmovmskb\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK>;
} // ExeDomain = SSEPackedInt
@ -6024,29 +6005,26 @@ let Predicates = [UseSSE41] in {
/// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
(ins VR128:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set GR32orGR64:$dst, (X86pextrb (v16i8 VR128:$src1),
imm:$src2))]>,
OpSize;
let neverHasSideEffects = 1, mayStore = 1 in
def mr : SS4AIi8<opc, MRMDestMem, (outs),
(ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[]>, OpSize;
// FIXME:
// There's an AssertZext in the way of writing the store pattern
// (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
}
let Predicates = [HasAVX] in {
let Predicates = [HasAVX] in
defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
(ins VR128:$src1, i32i8imm:$src2),
"vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
}
defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
@ -6054,7 +6032,7 @@ defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
/// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
let isCodeGenOnly = 1, hasSideEffects = 0 in
def rr_REV : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
def rr_REV : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
(ins VR128:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
@ -6123,11 +6101,11 @@ defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
/// destination
multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr,
OpndItins itins = DEFAULT_ITINS> {
def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
(ins VR128:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set GR32:$dst,
[(set GR32orGR64:$dst,
(extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))],
itins.rr>,
OpSize;
@ -6140,13 +6118,8 @@ multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr,
}
let ExeDomain = SSEPackedSingle in {
let Predicates = [UseAVX] in {
let Predicates = [UseAVX] in
defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
(ins VR128:$src1, i32i8imm:$src2),
"vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[]>, OpSize, VEX;
}
defm EXTRACTPS : SS41I_extractf32<0x17, "extractps", SSE_EXTRACT_ITINS>;
}
@ -6168,13 +6141,13 @@ def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
(ins VR128:$src1, GR32orGR64:$src2, i32i8imm:$src3),
!if(Is2Addr,
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
[(set VR128:$dst,
(X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
(X86pinsrb VR128:$src1, GR32orGR64:$src2, imm:$src3))]>, OpSize;
def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
!if(Is2Addr,

View File

@ -549,8 +549,8 @@ cvttpd2dq 0xdeadbeef(%ebx,%ecx,8),%xmm5
// rdar://8490728 - llvm-mc rejects 'movmskpd'
movmskpd %xmm6, %rax
// CHECK: movmskpd %xmm6, %rax
// CHECK: encoding: [0x66,0x48,0x0f,0x50,0xc6]
// CHECK: movmskpd %xmm6, %eax
// CHECK: encoding: [0x66,0x0f,0x50,0xc6]
movmskpd %xmm6, %eax
// CHECK: movmskpd %xmm6, %eax
// CHECK: encoding: [0x66,0x0f,0x50,0xc6]

View File

@ -4044,7 +4044,7 @@ vdivpd -4(%rcx,%rbx,8), %xmm10, %xmm11
// CHECK: encoding: [0xc4,0x43,0x79,0x17,0xc0,0x0a]
vextractps $10, %xmm8, %r8
// CHECK: vextractps $7, %xmm4, %rcx
// CHECK: vextractps $7, %xmm4, %ecx
// CHECK: encoding: [0xc4,0xe3,0x79,0x17,0xe1,0x07]
vextractps $7, %xmm4, %rcx
@ -4052,35 +4052,35 @@ vdivpd -4(%rcx,%rbx,8), %xmm10, %xmm11
// CHECK: encoding: [0xc4,0xe1,0xf9,0x7e,0xe1]
vmovd %xmm4, %rcx
// CHECK: vmovmskpd %xmm4, %rcx
// CHECK: vmovmskpd %xmm4, %ecx
// CHECK: encoding: [0xc5,0xf9,0x50,0xcc]
vmovmskpd %xmm4, %rcx
// CHECK: vmovmskpd %ymm4, %rcx
// CHECK: vmovmskpd %ymm4, %ecx
// CHECK: encoding: [0xc5,0xfd,0x50,0xcc]
vmovmskpd %ymm4, %rcx
// CHECK: vmovmskps %xmm4, %rcx
// CHECK: vmovmskps %xmm4, %ecx
// CHECK: encoding: [0xc5,0xf8,0x50,0xcc]
vmovmskps %xmm4, %rcx
// CHECK: vmovmskps %ymm4, %rcx
// CHECK: vmovmskps %ymm4, %ecx
// CHECK: encoding: [0xc5,0xfc,0x50,0xcc]
vmovmskps %ymm4, %rcx
// CHECK: vpextrb $7, %xmm4, %rcx
// CHECK: vpextrb $7, %xmm4, %ecx
// CHECK: encoding: [0xc4,0xe3,0x79,0x14,0xe1,0x07]
vpextrb $7, %xmm4, %rcx
// CHECK: vpinsrw $7, %r8, %xmm15, %xmm8
// CHECK: vpinsrw $7, %r8d, %xmm15, %xmm8
// CHECK: encoding: [0xc4,0x41,0x01,0xc4,0xc0,0x07]
vpinsrw $7, %r8, %xmm15, %xmm8
// CHECK: vpinsrw $7, %rcx, %xmm4, %xmm6
// CHECK: vpinsrw $7, %ecx, %xmm4, %xmm6
// CHECK: encoding: [0xc5,0xd9,0xc4,0xf1,0x07]
vpinsrw $7, %rcx, %xmm4, %xmm6
// CHECK: vpmovmskb %xmm4, %rcx
// CHECK: vpmovmskb %xmm4, %ecx
// CHECK: encoding: [0xc5,0xf9,0xd7,0xcc]
vpmovmskb %xmm4, %rcx

View File

@ -232,7 +232,7 @@ sha256msg2 (%rax), %xmm2
// CHECK: encoding: [0xdf,0x07]
filds (%rdi)
// CHECK: pmovmskb %xmm5, %rcx
// CHECK: pmovmskb %xmm5, %ecx
// CHECK: encoding: [0x66,0x0f,0xd7,0xcd]
pmovmskb %xmm5,%rcx
@ -240,6 +240,6 @@ sha256msg2 (%rax), %xmm2
// CHECK: encoding: [0x66,0x0f,0xc4,0xe9,0x03]
pinsrw $3, %ecx, %xmm5
// CHECK: pinsrw $3, %rcx, %xmm5
// CHECK: pinsrw $3, %ecx, %xmm5
// CHECK: encoding: [0x66,0x0f,0xc4,0xe9,0x03]
pinsrw $3, %rcx, %xmm5

View File

@ -1213,6 +1213,7 @@ OperandType RecognizableInstr::typeFromString(const std::string &s,
TYPE("i32i8imm", TYPE_IMM32)
TYPE("u32u8imm", TYPE_IMM32)
TYPE("GR32", TYPE_Rv)
TYPE("GR32orGR64", TYPE_R32)
TYPE("i64mem", TYPE_Mv)
TYPE("i64i32imm", TYPE_IMM64)
TYPE("i64i8imm", TYPE_IMM64)
@ -1323,6 +1324,7 @@ OperandEncoding RecognizableInstr::rmRegisterEncodingFromString
bool hasOpSizePrefix) {
ENCODING("GR16", ENCODING_RM)
ENCODING("GR32", ENCODING_RM)
ENCODING("GR32orGR64", ENCODING_RM)
ENCODING("GR64", ENCODING_RM)
ENCODING("GR8", ENCODING_RM)
ENCODING("VR128", ENCODING_RM)
@ -1346,6 +1348,7 @@ OperandEncoding RecognizableInstr::roRegisterEncodingFromString
bool hasOpSizePrefix) {
ENCODING("GR16", ENCODING_REG)
ENCODING("GR32", ENCODING_REG)
ENCODING("GR32orGR64", ENCODING_REG)
ENCODING("GR64", ENCODING_REG)
ENCODING("GR8", ENCODING_REG)
ENCODING("VR128", ENCODING_REG)