TableGen/ARM64: print aliases even if they have syntax variants.

To get at least one use of the change (and some actual tests) in with its
commit, I've enabled the AArch64 & ARM64 NEON mov aliases.

llvm-svn: 208867
This commit is contained in:
Tim Northover 2014-05-15 11:16:32 +00:00
parent dd8fca5136
commit d8d65a69cf
19 changed files with 154 additions and 131 deletions

View File

@ -446,12 +446,10 @@ defm ORRvvv : NeonI_3VSame_B_sizes<0b0, 0b10, 0b00011, "orr", or, or, 1>;
// Vector Move - register
// Alias for ORR if Vn=Vm.
// FIXME: This is actually the preferred syntax but TableGen can't deal with
// custom printing of aliases.
def : NeonInstAlias<"mov $Rd.8b, $Rn.8b",
(ORRvvv_8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rn), 0>;
(ORRvvv_8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rn)>;
def : NeonInstAlias<"mov $Rd.16b, $Rn.16b",
(ORRvvv_16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rn), 0>;
(ORRvvv_16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rn)>;
// The MOVI instruction takes two immediate operands. The first is the
// immediate encoding, while the second is the cmode. A cmode of 14, or

View File

@ -4956,7 +4956,7 @@ def MCR2 : MovRCopro2<"mcr2", 0 /* from ARM core register to coprocessor */,
[(int_arm_mcr2 imm:$cop, imm:$opc1, GPR:$Rt, imm:$CRn,
imm:$CRm, imm:$opc2)]>,
Requires<[PreV8]>;
def : ARMInstAlias<"mcr2$ $cop, $opc1, $Rt, $CRn, $CRm",
def : ARMInstAlias<"mcr2 $cop, $opc1, $Rt, $CRn, $CRm",
(MCR2 p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn,
c_imm:$CRm, 0)>;
def MRC2 : MovRCopro2<"mrc2", 1 /* from coprocessor to ARM core register */,
@ -4964,7 +4964,7 @@ def MRC2 : MovRCopro2<"mrc2", 1 /* from coprocessor to ARM core register */,
(ins p_imm:$cop, imm0_7:$opc1, c_imm:$CRn, c_imm:$CRm,
imm0_7:$opc2), []>,
Requires<[PreV8]>;
def : ARMInstAlias<"mrc2$ $cop, $opc1, $Rt, $CRn, $CRm",
def : ARMInstAlias<"mrc2 $cop, $opc1, $Rt, $CRn, $CRm",
(MRC2 GPRwithAPSR:$Rt, p_imm:$cop, imm0_7:$opc1, c_imm:$CRn,
c_imm:$CRm, 0)>;

View File

@ -2518,25 +2518,22 @@ def : Pat<(ARM64bsl (v4i32 V128:$Rd), V128:$Rn, V128:$Rm),
def : Pat<(ARM64bsl (v2i64 V128:$Rd), V128:$Rn, V128:$Rm),
(BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
// FIXME: the .16b and .8b variantes should be emitted by the
// AsmWriter. TableGen's AsmWriter-generator doesn't deal with variant syntaxes
// in aliases yet though.
def : InstAlias<"mov{\t$dst.16b, $src.16b|.16b\t$dst, $src}",
(ORRv16i8 V128:$dst, V128:$src, V128:$src), 1>;
def : InstAlias<"mov{\t$dst.8h, $src.8h|.8h\t$dst, $src}",
(ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
def : InstAlias<"{mov\t$dst.8h, $src.8h|mov.8h\t$dst, $src}",
def : InstAlias<"mov{\t$dst.4s, $src.4s|.4s\t$dst, $src}",
(ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
def : InstAlias<"{mov\t$dst.4s, $src.4s|mov.4s\t$dst, $src}",
(ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
def : InstAlias<"{mov\t$dst.2d, $src.2d|mov.2d\t$dst, $src}",
def : InstAlias<"mov{\t$dst.2d, $src.2d|.2d\t$dst, $src}",
(ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
def : InstAlias<"{mov\t$dst.8b, $src.8b|mov.8b\t$dst, $src}",
def : InstAlias<"mov{\t$dst.8b, $src.8b|.8b\t$dst, $src}",
(ORRv8i8 V64:$dst, V64:$src, V64:$src), 1>;
def : InstAlias<"mov{\t$dst.4h, $src.4h|.4h\t$dst, $src}",
(ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
def : InstAlias<"{mov\t$dst.4h, $src.4h|mov.4h\t$dst, $src}",
def : InstAlias<"mov{\t$dst.2s, $src.2s|.2s\t$dst, $src}",
(ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
def : InstAlias<"{mov\t$dst.2s, $src.2s|mov.2s\t$dst, $src}",
(ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
def : InstAlias<"{mov\t$dst.1d, $src.1d|mov.1d\t$dst, $src}",
def : InstAlias<"mov{\t$dst.1d, $src.1d|.1d\t$dst, $src}",
(ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
def : InstAlias<"{cmls\t$dst.8b, $src1.8b, $src2.8b" #

View File

@ -2772,19 +2772,29 @@ defm : ShiftRotateByOneAlias<"ror", "ROR">;
FIXME */
// test: We accept "testX <reg>, <mem>" and "testX <mem>, <reg>" as synonyms.
def : InstAlias<"test{b}\t{$val, $mem|$mem, $val}", (TEST8rm GR8 :$val, i8mem :$mem)>;
def : InstAlias<"test{w}\t{$val, $mem|$mem, $val}", (TEST16rm GR16:$val, i16mem:$mem)>;
def : InstAlias<"test{l}\t{$val, $mem|$mem, $val}", (TEST32rm GR32:$val, i32mem:$mem)>;
def : InstAlias<"test{q}\t{$val, $mem|$mem, $val}", (TEST64rm GR64:$val, i64mem:$mem)>;
def : InstAlias<"test{b}\t{$val, $mem|$mem, $val}",
(TEST8rm GR8 :$val, i8mem :$mem), 0>;
def : InstAlias<"test{w}\t{$val, $mem|$mem, $val}",
(TEST16rm GR16:$val, i16mem:$mem), 0>;
def : InstAlias<"test{l}\t{$val, $mem|$mem, $val}",
(TEST32rm GR32:$val, i32mem:$mem), 0>;
def : InstAlias<"test{q}\t{$val, $mem|$mem, $val}",
(TEST64rm GR64:$val, i64mem:$mem), 0>;
// xchg: We accept "xchgX <reg>, <mem>" and "xchgX <mem>, <reg>" as synonyms.
def : InstAlias<"xchg{b}\t{$mem, $val|$val, $mem}", (XCHG8rm GR8 :$val, i8mem :$mem)>;
def : InstAlias<"xchg{w}\t{$mem, $val|$val, $mem}", (XCHG16rm GR16:$val, i16mem:$mem)>;
def : InstAlias<"xchg{l}\t{$mem, $val|$val, $mem}", (XCHG32rm GR32:$val, i32mem:$mem)>;
def : InstAlias<"xchg{q}\t{$mem, $val|$val, $mem}", (XCHG64rm GR64:$val, i64mem:$mem)>;
def : InstAlias<"xchg{b}\t{$mem, $val|$val, $mem}",
(XCHG8rm GR8 :$val, i8mem :$mem), 0>;
def : InstAlias<"xchg{w}\t{$mem, $val|$val, $mem}",
(XCHG16rm GR16:$val, i16mem:$mem), 0>;
def : InstAlias<"xchg{l}\t{$mem, $val|$val, $mem}",
(XCHG32rm GR32:$val, i32mem:$mem), 0>;
def : InstAlias<"xchg{q}\t{$mem, $val|$val, $mem}",
(XCHG64rm GR64:$val, i64mem:$mem), 0>;
// xchg: We accept "xchgX <reg>, %eax" and "xchgX %eax, <reg>" as synonyms.
def : InstAlias<"xchg{w}\t{%ax, $src|$src, ax}", (XCHG16ar GR16:$src)>;
def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}", (XCHG32ar GR32:$src)>, Requires<[Not64BitMode]>;
def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}", (XCHG32ar64 GR32_NOAX:$src)>, Requires<[In64BitMode]>;
def : InstAlias<"xchg{q}\t{%rax, $src|$src, rax}", (XCHG64ar GR64:$src)>;
def : InstAlias<"xchg{w}\t{%ax, $src|$src, ax}", (XCHG16ar GR16:$src), 0>;
def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}",
(XCHG32ar GR32:$src), 0>, Requires<[Not64BitMode]>;
def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}",
(XCHG32ar64 GR32_NOAX:$src), 0>, Requires<[In64BitMode]>;
def : InstAlias<"xchg{q}\t{%rax, $src|$src, rax}", (XCHG64ar GR64:$src), 0>;

View File

@ -2005,7 +2005,7 @@ def VCVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
// XMM only
def : InstAlias<"vcvtpd2dqx\t{$src, $dst|$dst, $src}",
(VCVTPD2DQrr VR128:$dst, VR128:$src)>;
(VCVTPD2DQrr VR128:$dst, VR128:$src), 0>;
def VCVTPD2DQXrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"vcvtpd2dqx\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
@ -2024,7 +2024,7 @@ def VCVTPD2DQYrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
(int_x86_avx_cvt_pd2dq_256 (loadv4f64 addr:$src)))]>,
VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
def : InstAlias<"vcvtpd2dq\t{$src, $dst|$dst, $src}",
(VCVTPD2DQYrr VR128:$dst, VR256:$src)>;
(VCVTPD2DQYrr VR128:$dst, VR256:$src), 0>;
}
def CVTPD2DQrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
@ -2127,7 +2127,7 @@ def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
// XMM only
def : InstAlias<"vcvttpd2dqx\t{$src, $dst|$dst, $src}",
(VCVTTPD2DQrr VR128:$dst, VR128:$src)>;
(VCVTTPD2DQrr VR128:$dst, VR128:$src), 0>;
def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvttpd2dqx\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvttpd2dq
@ -2146,7 +2146,7 @@ def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
(int_x86_avx_cvtt_pd2dq_256 (loadv4f64 addr:$src)))],
IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
def : InstAlias<"vcvttpd2dq\t{$src, $dst|$dst, $src}",
(VCVTTPD2DQYrr VR128:$dst, VR256:$src)>;
(VCVTTPD2DQYrr VR128:$dst, VR256:$src), 0>;
let Predicates = [HasAVX] in {
def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
@ -2252,7 +2252,7 @@ def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
// XMM only
def : InstAlias<"vcvtpd2psx\t{$src, $dst|$dst, $src}",
(VCVTPD2PSrr VR128:$dst, VR128:$src)>;
(VCVTPD2PSrr VR128:$dst, VR128:$src), 0>;
def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvtpd2psx\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
@ -2271,7 +2271,7 @@ def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
(int_x86_avx_cvt_pd2_ps_256 (loadv4f64 addr:$src)))],
IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
def : InstAlias<"vcvtpd2ps\t{$src, $dst|$dst, $src}",
(VCVTPD2PSYrr VR128:$dst, VR256:$src)>;
(VCVTPD2PSYrr VR128:$dst, VR256:$src), 0>;
def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtpd2ps\t{$src, $dst|$dst, $src}",
@ -7873,18 +7873,20 @@ def PCLMULQDQrm : PCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
multiclass pclmul_alias<string asm, int immop> {
def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
(PCLMULQDQrr VR128:$dst, VR128:$src, immop)>;
(PCLMULQDQrr VR128:$dst, VR128:$src, immop), 0>;
def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
(PCLMULQDQrm VR128:$dst, i128mem:$src, immop)>;
(PCLMULQDQrm VR128:$dst, i128mem:$src, immop), 0>;
def : InstAlias<!strconcat("vpclmul", asm,
"dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
(VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop)>;
(VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop),
0>;
def : InstAlias<!strconcat("vpclmul", asm,
"dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
(VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop)>;
(VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop),
0>;
}
defm : pclmul_alias<"hqhq", 0x11>;
defm : pclmul_alias<"hqlq", 0x01>;

View File

@ -100,7 +100,7 @@ define void @check_stack_args() {
; CHECK-ARM64-DAG: str {{q[0-9]+}}, [sp]
; CHECK-ARM64-DAG: fmov d[[FINAL_DOUBLE:[0-9]+]], #1.0
; CHECK-ARM64: orr v0.16b, v[[FINAL_DOUBLE]].16b, v[[FINAL_DOUBLE]].16b
; CHECK-ARM64: mov v0.16b, v[[FINAL_DOUBLE]].16b
; CHECK-ARM64-NONEON-DAG: str {{q[0-9]+}}, [sp]
; CHECK-ARM64-NONEON-DAG: fmov d[[FINAL_DOUBLE:[0-9]+]], #1.0

View File

@ -3,8 +3,8 @@
define <4 x i32> @copyTuple.QPair(i8* %a, i8* %b) {
; CHECK-LABEL: copyTuple.QPair:
; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: ld2 { {{v[0-9]+}}.s, {{v[0-9]+}}.s }[{{[0-9]+}}], [x{{[0-9]+|sp}}]
entry:
%vld = tail call { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2lane.v4i32(i8* %a, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 2, i32 2, i32 2, i32 2>, i32 0, i32 4)
@ -16,9 +16,9 @@ entry:
define <4 x i32> @copyTuple.QTriple(i8* %a, i8* %b, <4 x i32> %c) {
; CHECK-LABEL: copyTuple.QTriple:
; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: ld3 { {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s }[{{[0-9]+}}], [x{{[0-9]+|sp}}]
entry:
%vld = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3lane.v4i32(i8* %a, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c, <4 x i32> %c, i32 0, i32 4)
@ -30,10 +30,10 @@ entry:
define <4 x i32> @copyTuple.QQuad(i8* %a, i8* %b, <4 x i32> %c) {
; CHECK-LABEL: copyTuple.QQuad:
; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: ld4 { {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s }[{{[0-9]+}}], [x{{[0-9]+|sp}}]
entry:
%vld = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld4lane.v4i32(i8* %a, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c, <4 x i32> %c, <4 x i32> %c, i32 0, i32 4)

View File

@ -246,7 +246,7 @@ entry:
define <2 x i64> @test_vuzp2q_s64(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test_vuzp2q_s64:
; CHECK-AARCH64: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
; CHECK-AARCH64-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-AARCH64-NEXT: mov {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-ARM64: zip2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
entry:
%shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
@ -305,7 +305,7 @@ entry:
define <2 x i64> @test_vuzp2q_u64(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test_vuzp2q_u64:
; CHECK-AARCH64: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
; CHECK-AARCH64-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-AARCH64-NEXT: mov {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-ARM64: zip2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
entry:
%shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
@ -332,7 +332,7 @@ entry:
define <2 x double> @test_vuzp2q_f64(<2 x double> %a, <2 x double> %b) {
; CHECK-LABEL: test_vuzp2q_f64:
; CHECK-AARCH64: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
; CHECK-AARCH64-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-AARCH64-NEXT: mov {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-ARM64: zip2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
entry:
%shuffle.i = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 1, i32 3>

View File

@ -3,8 +3,8 @@
define <4 x i32> @copyTuple.QPair(i32* %a, i32* %b) {
; CHECK-LABEL: copyTuple.QPair:
; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: ld2 { {{v[0-9]+}}.s, {{v[0-9]+}}.s }[{{[0-9]+}}], [x{{[0-9]+|sp}}]
entry:
%vld = tail call { <4 x i32>, <4 x i32> } @llvm.arm64.neon.ld2lane.v4i32.p0i32(<4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 2, i32 2, i32 2, i32 2>, i64 1, i32* %a)
@ -16,9 +16,9 @@ entry:
define <4 x i32> @copyTuple.QTriple(i32* %a, i32* %b, <4 x i32> %c) {
; CHECK-LABEL: copyTuple.QTriple:
; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: ld3 { {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s }[{{[0-9]+}}], [x{{[0-9]+|sp}}]
entry:
%vld = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm64.neon.ld3lane.v4i32.p0i32(<4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c, <4 x i32> %c, i64 1, i32* %a)
@ -30,10 +30,10 @@ entry:
define <4 x i32> @copyTuple.QQuad(i32* %a, i32* %b, <4 x i32> %c) {
; CHECK-LABEL: copyTuple.QQuad:
; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: orr v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
; CHECK: ld4 { {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s }[{{[0-9]+}}], [x{{[0-9]+|sp}}]
entry:
%vld = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm64.neon.ld4lane.v4i32.p0i32(<4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c, <4 x i32> %c, <4 x i32> %c, i64 1, i32* %a)

View File

@ -9,8 +9,8 @@
define void @test_D1D2_from_D0D1(i8* %addr) #0 {
; CHECK-LABEL: test_D1D2_from_D0D1:
; CHECK: orr.8b v2, v1
; CHECK: orr.8b v1, v0
; CHECK: mov.8b v2, v1
; CHECK: mov.8b v1, v0
entry:
%addr_v8i8 = bitcast i8* %addr to <8 x i8>*
%vec = tail call { <8 x i8>, <8 x i8> } @llvm.arm64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8)
@ -26,8 +26,8 @@ entry:
define void @test_D0D1_from_D1D2(i8* %addr) #0 {
; CHECK-LABEL: test_D0D1_from_D1D2:
; CHECK: orr.8b v0, v1
; CHECK: orr.8b v1, v2
; CHECK: mov.8b v0, v1
; CHECK: mov.8b v1, v2
entry:
%addr_v8i8 = bitcast i8* %addr to <8 x i8>*
%vec = tail call { <8 x i8>, <8 x i8> } @llvm.arm64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8)
@ -43,8 +43,8 @@ entry:
define void @test_D0D1_from_D31D0(i8* %addr) #0 {
; CHECK-LABEL: test_D0D1_from_D31D0:
; CHECK: orr.8b v1, v0
; CHECK: orr.8b v0, v31
; CHECK: mov.8b v1, v0
; CHECK: mov.8b v0, v31
entry:
%addr_v8i8 = bitcast i8* %addr to <8 x i8>*
%vec = tail call { <8 x i8>, <8 x i8> } @llvm.arm64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8)
@ -60,8 +60,8 @@ entry:
define void @test_D31D0_from_D0D1(i8* %addr) #0 {
; CHECK-LABEL: test_D31D0_from_D0D1:
; CHECK: orr.8b v31, v0
; CHECK: orr.8b v0, v1
; CHECK: mov.8b v31, v0
; CHECK: mov.8b v0, v1
entry:
%addr_v8i8 = bitcast i8* %addr to <8 x i8>*
%vec = tail call { <8 x i8>, <8 x i8> } @llvm.arm64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8)
@ -77,9 +77,9 @@ entry:
define void @test_D2D3D4_from_D0D1D2(i8* %addr) #0 {
; CHECK-LABEL: test_D2D3D4_from_D0D1D2:
; CHECK: orr.8b v4, v2
; CHECK: orr.8b v3, v1
; CHECK: orr.8b v2, v0
; CHECK: mov.8b v4, v2
; CHECK: mov.8b v3, v1
; CHECK: mov.8b v2, v0
entry:
%addr_v8i8 = bitcast i8* %addr to <8 x i8>*
%vec = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm64.neon.ld3.v8i8.p0v8i8(<8 x i8>* %addr_v8i8)
@ -97,9 +97,9 @@ entry:
define void @test_Q0Q1Q2_from_Q1Q2Q3(i8* %addr) #0 {
; CHECK-LABEL: test_Q0Q1Q2_from_Q1Q2Q3:
; CHECK: orr.16b v0, v1
; CHECK: orr.16b v1, v2
; CHECK: orr.16b v2, v3
; CHECK: mov.16b v0, v1
; CHECK: mov.16b v1, v2
; CHECK: mov.16b v2, v3
entry:
%addr_v16i8 = bitcast i8* %addr to <16 x i8>*
%vec = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm64.neon.ld3.v16i8.p0v16i8(<16 x i8>* %addr_v16i8)
@ -116,10 +116,10 @@ entry:
define void @test_Q1Q2Q3Q4_from_Q30Q31Q0Q1(i8* %addr) #0 {
; CHECK-LABEL: test_Q1Q2Q3Q4_from_Q30Q31Q0Q1:
; CHECK: orr.16b v4, v1
; CHECK: orr.16b v3, v0
; CHECK: orr.16b v2, v31
; CHECK: orr.16b v1, v30
; CHECK: mov.16b v4, v1
; CHECK: mov.16b v3, v0
; CHECK: mov.16b v2, v31
; CHECK: mov.16b v1, v30
%addr_v16i8 = bitcast i8* %addr to <16 x i8>*
%vec = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm64.neon.ld4.v16i8.p0v16i8(<16 x i8>* %addr_v16i8)
%vec0 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vec, 0

View File

@ -205,7 +205,7 @@ define void @test_select(i1 %cond, fp128 %lhs, fp128 %rhs) {
; CHECK: tst w0, #0x1
; CHECK-NEXT: b.eq [[IFFALSE:.LBB[0-9]+_[0-9]+]]
; CHECK-NEXT: BB#
; CHECK-NEXT: orr v[[VAL:[0-9]+]].16b, v0.16b, v0.16b
; CHECK-NEXT: mov v[[VAL:[0-9]+]].16b, v0.16b
; CHECK-NEXT: [[IFFALSE]]:
; CHECK: str q[[VAL]], [{{x[0-9]+}}, :lo12:lhs]
ret void
@ -264,7 +264,7 @@ define fp128 @test_neg(fp128 %in) {
; Could in principle be optimized to fneg which we can't select, this makes
; sure that doesn't happen.
%ret = fsub fp128 0xL00000000000000008000000000000000, %in
; CHECK: orr v1.16b, v0.16b, v0.16b
; CHECK: mov v1.16b, v0.16b
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:[[MINUS0]]]
; CHECK: bl __subtf3

View File

@ -197,14 +197,13 @@
// Vector Move - register
//----------------------------------------------------------------------
// FIXME: these should all print with the "mov" syntax.
mov v0.8b, v31.8b
mov v15.16b, v16.16b
orr v0.8b, v31.8b, v31.8b
orr v15.16b, v16.16b, v16.16b
// CHECK: orr v0.8b, v31.8b, v31.8b // encoding: [0xe0,0x1f,0xbf,0x0e]
// CHECK: orr v15.16b, v16.16b, v16.16b // encoding: [0x0f,0x1e,0xb0,0x4e]
// CHECK: orr v0.8b, v31.8b, v31.8b // encoding: [0xe0,0x1f,0xbf,0x0e]
// CHECK: orr v15.16b, v16.16b, v16.16b // encoding: [0x0f,0x1e,0xb0,0x4e]
// CHECK: mov v0.8b, v31.8b // encoding: [0xe0,0x1f,0xbf,0x0e]
// CHECK: mov v15.16b, v16.16b // encoding: [0x0f,0x1e,0xb0,0x4e]
// CHECK: mov v0.8b, v31.8b // encoding: [0xe0,0x1f,0xbf,0x0e]
// CHECK: mov v15.16b, v16.16b // encoding: [0x0f,0x1e,0xb0,0x4e]

View File

@ -445,14 +445,14 @@ foo:
bsl.8b v0, v0, v0
eor.8b v0, v0, v0
orn.8b v0, v0, v0
orr.8b v0, v0, v0
orr.8b v0, v0, v1
; CHECK: bif.8b v0, v0, v0 ; encoding: [0x00,0x1c,0xe0,0x2e]
; CHECK: bit.8b v0, v0, v0 ; encoding: [0x00,0x1c,0xa0,0x2e]
; CHECK: bsl.8b v0, v0, v0 ; encoding: [0x00,0x1c,0x60,0x2e]
; CHECK: eor.8b v0, v0, v0 ; encoding: [0x00,0x1c,0x20,0x2e]
; CHECK: orn.8b v0, v0, v0 ; encoding: [0x00,0x1c,0xe0,0x0e]
; CHECK: orr.8b v0, v0, v0 ; encoding: [0x00,0x1c,0xa0,0x0e]
; CHECK: orr.8b v0, v0, v1 ; encoding: [0x00,0x1c,0xa1,0x0e]
sadalp.4h v0, v0
sadalp.8h v0, v0
@ -1843,8 +1843,8 @@ foo:
mov.16b v0, v0
mov.2s v0, v0
; CHECK: orr.16b v0, v0, v0 ; encoding: [0x00,0x1c,0xa0,0x4e]
; CHECK: orr.8b v0, v0, v0 ; encoding: [0x00,0x1c,0xa0,0x0e]
; CHECK: mov.16b v0, v0 ; encoding: [0x00,0x1c,0xa0,0x4e]
; CHECK: mov.8b v0, v0 ; encoding: [0x00,0x1c,0xa0,0x0e]
; uadalp/sadalp verbose mode aliases.

View File

@ -133,10 +133,8 @@
# Vector Move - register
#------------------------------------------------------------------------------
# FIXME: these should print as "mov", but TableGen can't handle it.
# CHECK: orr v1.16b, v15.16b, v15.16b
# CHECK: orr v25.8b, v4.8b, v4.8b
# CHECK: mov v1.16b, v15.16b
# CHECK: mov v25.8b, v4.8b
0xe1 0x1d 0xaf 0x4e
0x99 0x1c 0xa4 0x0e

View File

@ -320,14 +320,14 @@
0x00 0x1c 0x60 0x2e
0x00 0x1c 0x20 0x2e
0x00 0x1c 0xe0 0x0e
0x00 0x1c 0xa0 0x0e
0x00 0x1c 0xa1 0x0e
# CHECK: bif.8b v0, v0, v0
# CHECK: bit.8b v0, v0, v0
# CHECK: bsl.8b v0, v0, v0
# CHECK: eor.8b v0, v0, v0
# CHECK: orn.8b v0, v0, v0
# CHECK: orr.8b v0, v0, v0
# CHECK: orr.8b v0, v0, v1
0x00 0x68 0x20 0x0e
0x00 0x68 0x20 0x4e

View File

@ -1375,7 +1375,8 @@ void AsmMatcherInfo::buildInfo() {
std::vector<Record*> AllInstAliases =
Records.getAllDerivedDefinitions("InstAlias");
for (unsigned i = 0, e = AllInstAliases.size(); i != e; ++i) {
CodeGenInstAlias *Alias = new CodeGenInstAlias(AllInstAliases[i], Target);
CodeGenInstAlias *Alias =
new CodeGenInstAlias(AllInstAliases[i], AsmVariantNo, Target);
// If the tblgen -match-prefix option is specified (for tblgen hackers),
// filter the set of instruction aliases we consider, based on the target

View File

@ -651,6 +651,25 @@ public:
int getOpIndex(StringRef Op) { return OpMap[Op].first; }
std::pair<int, int> &getOpData(StringRef Op) { return OpMap[Op]; }
std::pair<StringRef, StringRef::iterator> parseName(StringRef::iterator Start,
StringRef::iterator End) {
StringRef::iterator I = Start;
if (*I == '{') {
// ${some_name}
Start = ++I;
while (I != End && *I != '}')
++I;
} else {
// $name, just eat the usual suspects.
while (I != End &&
((*I >= 'a' && *I <= 'z') || (*I >= 'A' && *I <= 'Z') ||
(*I >= '0' && *I <= '9') || *I == '_'))
++I;
}
return std::make_pair(StringRef(Start, I - Start), I);
}
void print(raw_ostream &O) {
if (Conds.empty() && ReqFeatures.empty()) {
O.indent(6) << "return true;\n";
@ -675,21 +694,14 @@ public:
// Directly mangle mapped operands into the string. Each operand is
// identified by a '$' sign followed by a byte identifying the number of the
// operand. We add one to the index to avoid zero bytes.
std::pair<StringRef, StringRef> ASM = StringRef(AsmString).split(' ');
SmallString<128> OutString = ASM.first;
if (!ASM.second.empty()) {
StringRef ASM(AsmString);
SmallString<128> OutString;
raw_svector_ostream OS(OutString);
OS << ' ';
for (StringRef::iterator I = ASM.second.begin(), E = ASM.second.end();
I != E;) {
for (StringRef::iterator I = ASM.begin(), E = ASM.end(); I != E;) {
OS << *I;
if (*I == '$') {
StringRef::iterator Start = ++I;
while (I != E &&
((*I >= 'a' && *I <= 'z') || (*I >= 'A' && *I <= 'Z') ||
(*I >= '0' && *I <= '9') || *I == '_'))
++I;
StringRef Name(Start, I - Start);
StringRef Name;
std::tie(Name, I) = parseName(++I, E);
assert(isOpMapped(Name) && "Unmapped operand!");
int OpIndex, PrintIndex;
@ -705,7 +717,7 @@ public:
++I;
}
}
}
OS.flush();
// Emit the string.
O.indent(6) << "AsmString = \"" << OutString.str() << "\";\n";
@ -781,9 +793,10 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
// Create a map from the qualified name to a list of potential matches.
std::map<std::string, std::vector<CodeGenInstAlias*> > AliasMap;
unsigned Variant = AsmWriter->getValueAsInt("Variant");
for (std::vector<Record*>::iterator
I = AllInstAliases.begin(), E = AllInstAliases.end(); I != E; ++I) {
CodeGenInstAlias *Alias = new CodeGenInstAlias(*I, Target);
CodeGenInstAlias *Alias = new CodeGenInstAlias(*I, Variant, Target);
const Record *R = *I;
if (!R->getValueAsBit("EmitAlias"))
continue; // We were told not to emit the alias, but to emit the aliasee.
@ -976,7 +989,8 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
// Code that prints the alias, replacing the operands with the ones from the
// MCInst.
O << " unsigned I = 0;\n";
O << " while (AsmString[I] != ' ' && AsmString[I] != '\\0')\n";
O << " while (AsmString[I] != ' ' && AsmString[I] != '\t' &&\n";
O << " AsmString[I] != '\\0')\n";
O << " ++I;\n";
O << " OS << '\\t' << StringRef(AsmString, I);\n";

View File

@ -536,9 +536,13 @@ bool CodeGenInstAlias::tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo,
return false;
}
CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) {
AsmString = R->getValueAsString("AsmString");
CodeGenInstAlias::CodeGenInstAlias(Record *R, unsigned Variant,
CodeGenTarget &T)
: TheDef(R) {
Result = R->getValueAsDag("ResultInst");
AsmString = R->getValueAsString("AsmString");
AsmString = CodeGenInstruction::FlattenAsmStringVariants(AsmString, Variant);
// Verify that the root of the result is an instruction.
DefInit *DI = dyn_cast<DefInit>(Result->getOperator());

View File

@ -336,7 +336,7 @@ namespace llvm {
/// of them are matched by the operand, the second value should be -1.
std::vector<std::pair<unsigned, int> > ResultInstOperandIndex;
CodeGenInstAlias(Record *R, CodeGenTarget &T);
CodeGenInstAlias(Record *R, unsigned Variant, CodeGenTarget &T);
bool tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo,
Record *InstOpRec, bool hasSubOps, ArrayRef<SMLoc> Loc,