ARM64: print correct aliases for NEON mov & mvn instructions

In all cases, if a "mov" alias exists, it is the canonical form of the
instruction. Now that TableGen can support aliases containing syntax variants,
we can enable them and improve the quality of the asm output.

llvm-svn: 208874
This commit is contained in:
Tim Northover 2014-05-15 12:11:02 +00:00
parent a357badc58
commit 2509a3fc64
12 changed files with 145 additions and 153 deletions

View File

@ -5595,13 +5595,11 @@ class SIMDUMov<bit Q, string size, ValueType vectype, RegisterClass regtype,
: BaseSIMDMov<Q, size, 0b0111, regtype, idxtype, "umov",
[(set regtype:$Rd, (vector_extract (vectype V128:$Rn), idxtype:$idx))]>;
// FIXME: these aliases should be canonical, but TableGen can't handle the
// alternate syntaxes.
class SIMDMovAlias<string asm, string size, Instruction inst,
RegisterClass regtype, Operand idxtype>
: InstAlias<asm#"{\t$dst, $src"#size#"$idx" #
"|" # size # "\t$dst, $src$idx}",
(inst regtype:$dst, V128:$src, idxtype:$idx), 0>;
(inst regtype:$dst, V128:$src, idxtype:$idx)>;
multiclass SMov {
def vi8to32 : SIMDSMov<0, ".b", GPR32, VectorIndexB> {
@ -5685,18 +5683,16 @@ class SIMDInsFromElement<string size, ValueType vectype,
(elttype (vector_extract (vectype V128:$Rn), idxtype:$idx2)),
idxtype:$idx))]>;
// FIXME: the MOVs should be canonical, but TableGen's alias printing can't cope
// with syntax variants.
class SIMDInsMainMovAlias<string size, Instruction inst,
RegisterClass regtype, Operand idxtype>
: InstAlias<"mov" # "{\t$dst" # size # "$idx, $src" #
"|" # size #"\t$dst$idx, $src}",
(inst V128:$dst, idxtype:$idx, regtype:$src), 0>;
(inst V128:$dst, idxtype:$idx, regtype:$src)>;
class SIMDInsElementMovAlias<string size, Instruction inst,
Operand idxtype>
: InstAlias<"mov" # "{\t$dst" # size # "$idx, $src" # size # "$idx2" #
# "|" # size #" $dst$idx, $src$idx2}",
(inst V128:$dst, idxtype:$idx, V128:$src, idxtype:$idx2), 0>;
(inst V128:$dst, idxtype:$idx, V128:$src, idxtype:$idx2)>;
multiclass SIMDIns {

View File

@ -2315,10 +2315,10 @@ defm NEG : SIMDTwoVectorBHSD<1, 0b01011, "neg",
UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
defm NOT : SIMDTwoVectorB<1, 0b00, 0b00101, "not", vnot>;
// Aliases for MVN -> NOT.
def : InstAlias<"mvn.8b $Vd, $Vn", (NOTv8i8 V64:$Vd, V64:$Vn)>;
def : InstAlias<"mvn.16b $Vd, $Vn", (NOTv16i8 V128:$Vd, V128:$Vn)>;
def : InstAlias<"mvn $Vd.8b, $Vn.8b", (NOTv8i8 V64:$Vd, V64:$Vn)>;
def : InstAlias<"mvn $Vd.16b, $Vn.16b", (NOTv16i8 V128:$Vd, V128:$Vn)>;
def : InstAlias<"mvn{ $Vd.8b, $Vn.8b|.8b $Vd, $Vn}",
(NOTv8i8 V64:$Vd, V64:$Vn)>;
def : InstAlias<"mvn{ $Vd.16b, $Vn.16b|.16b $Vd, $Vn}",
(NOTv16i8 V128:$Vd, V128:$Vn)>;
def : Pat<(ARM64neg (v8i8 V64:$Rn)), (NEGv8i8 V64:$Rn)>;
def : Pat<(ARM64neg (v16i8 V128:$Rn)), (NEGv16i8 V128:$Rn)>;
@ -3733,8 +3733,6 @@ def : Pat<(v4f32 (ARM64dup (f32 fpimm0))), (MOVIv2d_ns (i32 0))>;
// EDIT per word & halfword: 2s, 4h, 4s, & 8h
defm MOVI : SIMDModifiedImmVectorShift<0, 0b10, 0b00, "movi">;
// FIXME: these should be canonical but the TableGen alias printer can't cope
// with syntax variants.
def : InstAlias<"movi $Vd.4h, $imm", (MOVIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
def : InstAlias<"movi $Vd.8h, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
def : InstAlias<"movi $Vd.2s, $imm", (MOVIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
@ -3775,8 +3773,6 @@ def MOVIv16b_ns : SIMDModifiedImmVectorNoShift<1, 0, 0b1110, V128, imm0_255,
// EDIT per word & halfword: 2s, 4h, 4s, & 8h
defm MVNI : SIMDModifiedImmVectorShift<1, 0b10, 0b00, "mvni">;
// FIXME: these should be canonical, but TableGen can't do aliases & syntax
// variants together.
def : InstAlias<"mvni $Vd.4h, $imm", (MVNIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
def : InstAlias<"mvni $Vd.8h, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
def : InstAlias<"mvni $Vd.2s, $imm", (MVNIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;

View File

@ -661,7 +661,7 @@ define <4 x i16> @vselect_v4i16(<4 x i16> %a) {
define <8 x i8> @vselect_cmp_ne(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) {
; CHECK-LABEL: vselect_cmp_ne:
; CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%cmp = icmp ne <8 x i8> %a, %b
%d = select <8 x i1> %cmp, <8 x i8> %b, <8 x i8> %c
@ -680,7 +680,7 @@ define <8 x i8> @vselect_cmp_eq(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) {
define <8 x i8> @vselect_cmpz_ne(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) {
; CHECK-LABEL: vselect_cmpz_ne:
; CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
; CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%cmp = icmp ne <8 x i8> %a, zeroinitializer
%d = select <8 x i1> %cmp, <8 x i8> %b, <8 x i8> %c

View File

@ -60,7 +60,7 @@ define <2 x i64> @cmeq2xi64(<2 x i64> %A, <2 x i64> %B) {
define <8 x i8> @cmne8xi8(<8 x i8> %A, <8 x i8> %B) {
; CHECK-LABEL: cmne8xi8:
; CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <8 x i8> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
@ -69,7 +69,7 @@ define <8 x i8> @cmne8xi8(<8 x i8> %A, <8 x i8> %B) {
define <16 x i8> @cmne16xi8(<16 x i8> %A, <16 x i8> %B) {
; CHECK-LABEL: cmne16xi8:
; CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <16 x i8> %A, %B;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
@ -78,7 +78,7 @@ define <16 x i8> @cmne16xi8(<16 x i8> %A, <16 x i8> %B) {
define <4 x i16> @cmne4xi16(<4 x i16> %A, <4 x i16> %B) {
; CHECK-LABEL: cmne4xi16:
; CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
; CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <4 x i16> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
@ -87,7 +87,7 @@ define <4 x i16> @cmne4xi16(<4 x i16> %A, <4 x i16> %B) {
define <8 x i16> @cmne8xi16(<8 x i16> %A, <8 x i16> %B) {
; CHECK-LABEL: cmne8xi16:
; CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <8 x i16> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
@ -96,7 +96,7 @@ define <8 x i16> @cmne8xi16(<8 x i16> %A, <8 x i16> %B) {
define <2 x i32> @cmne2xi32(<2 x i32> %A, <2 x i32> %B) {
; CHECK-LABEL: cmne2xi32:
; CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
; CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <2 x i32> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
@ -105,7 +105,7 @@ define <2 x i32> @cmne2xi32(<2 x i32> %A, <2 x i32> %B) {
define <4 x i32> @cmne4xi32(<4 x i32> %A, <4 x i32> %B) {
; CHECK-LABEL: cmne4xi32:
; CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <4 x i32> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@ -114,7 +114,7 @@ define <4 x i32> @cmne4xi32(<4 x i32> %A, <4 x i32> %B) {
define <2 x i64> @cmne2xi64(<2 x i64> %A, <2 x i64> %B) {
; CHECK-LABEL: cmne2xi64:
; CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <2 x i64> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@ -974,7 +974,7 @@ define <2 x i64> @cmltz2xi64(<2 x i64> %A) {
define <8 x i8> @cmneqz8xi8(<8 x i8> %A) {
; CHECK-LABEL: cmneqz8xi8:
; CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{0x0|0}}
; CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <8 x i8> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
@ -983,7 +983,7 @@ define <8 x i8> @cmneqz8xi8(<8 x i8> %A) {
define <16 x i8> @cmneqz16xi8(<16 x i8> %A) {
; CHECK-LABEL: cmneqz16xi8:
; CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{0x0|0}}
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <16 x i8> %A, zeroinitializer;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
@ -992,7 +992,7 @@ define <16 x i8> @cmneqz16xi8(<16 x i8> %A) {
define <4 x i16> @cmneqz4xi16(<4 x i16> %A) {
; CHECK-LABEL: cmneqz4xi16:
; CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #{{0x0|0}}
; CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <4 x i16> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
@ -1001,7 +1001,7 @@ define <4 x i16> @cmneqz4xi16(<4 x i16> %A) {
define <8 x i16> @cmneqz8xi16(<8 x i16> %A) {
; CHECK-LABEL: cmneqz8xi16:
; CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #{{0x0|0}}
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <8 x i16> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
@ -1010,7 +1010,7 @@ define <8 x i16> @cmneqz8xi16(<8 x i16> %A) {
define <2 x i32> @cmneqz2xi32(<2 x i32> %A) {
; CHECK-LABEL: cmneqz2xi32:
; CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0x0|0}}
; CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <2 x i32> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
@ -1019,7 +1019,7 @@ define <2 x i32> @cmneqz2xi32(<2 x i32> %A) {
define <4 x i32> @cmneqz4xi32(<4 x i32> %A) {
; CHECK-LABEL: cmneqz4xi32:
; CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0x0|0}}
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <4 x i32> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@ -1028,7 +1028,7 @@ define <4 x i32> @cmneqz4xi32(<4 x i32> %A) {
define <2 x i64> @cmneqz2xi64(<2 x i64> %A) {
; CHECK-LABEL: cmneqz2xi64:
; CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0x0|0}}
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <2 x i64> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@ -1525,7 +1525,7 @@ define <2 x i32> @fcmuno2xfloat(<2 x float> %A, <2 x float> %B) {
; CHECK: fcmge {{v[0-9]+}}.2s, v0.2s, v1.2s
; CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
; CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp uno <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
@ -1538,7 +1538,7 @@ define <4 x i32> @fcmuno4xfloat(<4 x float> %A, <4 x float> %B) {
; CHECK: fcmge {{v[0-9]+}}.4s, v0.4s, v1.4s
; CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp uno <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@ -1551,7 +1551,7 @@ define <2 x i64> @fcmuno2xdouble(<2 x double> %A, <2 x double> %B) {
; CHECK: fcmge {{v[0-9]+}}.2d, v0.2d, v1.2d
; CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp uno <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@ -1564,7 +1564,7 @@ define <2 x i32> @fcmueq2xfloat(<2 x float> %A, <2 x float> %B) {
; CHECK: fcmgt {{v[0-9]+}}.2s, v0.2s, v1.2s
; CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
; CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp ueq <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
@ -1577,7 +1577,7 @@ define <4 x i32> @fcmueq4xfloat(<4 x float> %A, <4 x float> %B) {
; CHECK: fcmgt {{v[0-9]+}}.4s, v0.4s, v1.4s
; CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ueq <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@ -1590,7 +1590,7 @@ define <2 x i64> @fcmueq2xdouble(<2 x double> %A, <2 x double> %B) {
; CHECK: fcmgt {{v[0-9]+}}.2d, v0.2d, v1.2d
; CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ueq <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@ -1601,7 +1601,7 @@ define <2 x i32> @fcmuge2xfloat(<2 x float> %A, <2 x float> %B) {
; Using registers other than v0, v1 are possible, but would be odd.
; UGE = ULE with swapped operands, ULE implemented as !OGT.
; CHECK: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
; CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp uge <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
@ -1612,7 +1612,7 @@ define <4 x i32> @fcmuge4xfloat(<4 x float> %A, <4 x float> %B) {
; Using registers other than v0, v1 are possible, but would be odd.
; UGE = ULE with swapped operands, ULE implemented as !OGT.
; CHECK: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp uge <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@ -1623,7 +1623,7 @@ define <2 x i64> @fcmuge2xdouble(<2 x double> %A, <2 x double> %B) {
; Using registers other than v0, v1 are possible, but would be odd.
; UGE = ULE with swapped operands, ULE implemented as !OGT.
; CHECK: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp uge <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@ -1634,7 +1634,7 @@ define <2 x i32> @fcmugt2xfloat(<2 x float> %A, <2 x float> %B) {
; Using registers other than v0, v1 are possible, but would be odd.
; UGT = ULT with swapped operands, ULT implemented as !OGE.
; CHECK: fcmge {{v[0-9]+}}.2s, v1.2s, v0.2s
; CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp ugt <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
@ -1645,7 +1645,7 @@ define <4 x i32> @fcmugt4xfloat(<4 x float> %A, <4 x float> %B) {
; Using registers other than v0, v1 are possible, but would be odd.
; UGT = ULT with swapped operands, ULT implemented as !OGE.
; CHECK: fcmge {{v[0-9]+}}.4s, v1.4s, v0.4s
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ugt <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@ -1653,7 +1653,7 @@ define <4 x i32> @fcmugt4xfloat(<4 x float> %A, <4 x float> %B) {
define <2 x i64> @fcmugt2xdouble(<2 x double> %A, <2 x double> %B) {
; CHECK-LABEL: fcmugt2xdouble:
; CHECK: fcmge {{v[0-9]+}}.2d, v1.2d, v0.2d
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ugt <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@ -1664,7 +1664,7 @@ define <2 x i32> @fcmule2xfloat(<2 x float> %A, <2 x float> %B) {
; Using registers other than v0, v1 are possible, but would be odd.
; ULE implemented as !OGT.
; CHECK: fcmgt {{v[0-9]+}}.2s, v0.2s, v1.2s
; CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp ule <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
@ -1675,7 +1675,7 @@ define <4 x i32> @fcmule4xfloat(<4 x float> %A, <4 x float> %B) {
; Using registers other than v0, v1 are possible, but would be odd.
; ULE implemented as !OGT.
; CHECK: fcmgt {{v[0-9]+}}.4s, v0.4s, v1.4s
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ule <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@ -1685,7 +1685,7 @@ define <2 x i64> @fcmule2xdouble(<2 x double> %A, <2 x double> %B) {
; Using registers other than v0, v1 are possible, but would be odd.
; ULE implemented as !OGT.
; CHECK: fcmgt {{v[0-9]+}}.2d, v0.2d, v1.2d
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ule <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@ -1696,7 +1696,7 @@ define <2 x i32> @fcmult2xfloat(<2 x float> %A, <2 x float> %B) {
; Using registers other than v0, v1 are possible, but would be odd.
; ULT implemented as !OGE.
; CHECK: fcmge {{v[0-9]+}}.2s, v0.2s, v1.2s
; CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp ult <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
@ -1707,7 +1707,7 @@ define <4 x i32> @fcmult4xfloat(<4 x float> %A, <4 x float> %B) {
; Using registers other than v0, v1 are possible, but would be odd.
; ULT implemented as !OGE.
; CHECK: fcmge {{v[0-9]+}}.4s, v0.4s, v1.4s
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ult <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@ -1717,7 +1717,7 @@ define <2 x i64> @fcmult2xdouble(<2 x double> %A, <2 x double> %B) {
; Using registers other than v0, v1 are possible, but would be odd.
; ULT implemented as !OGE.
; CHECK: fcmge {{v[0-9]+}}.2d, v0.2d, v1.2d
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ult <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@ -1728,7 +1728,7 @@ define <2 x i32> @fcmune2xfloat(<2 x float> %A, <2 x float> %B) {
; Using registers other than v0, v1 are possible, but would be odd.
; UNE = !OEQ.
; CHECK: fcmeq {{v[0-9]+}}.2s, v0.2s, v1.2s
; CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp une <2 x float> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
@ -1739,7 +1739,7 @@ define <4 x i32> @fcmune4xfloat(<4 x float> %A, <4 x float> %B) {
; Using registers other than v0, v1 are possible, but would be odd.
; UNE = !OEQ.
; CHECK: fcmeq {{v[0-9]+}}.4s, v0.4s, v1.4s
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp une <4 x float> %A, %B
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@ -1749,7 +1749,7 @@ define <2 x i64> @fcmune2xdouble(<2 x double> %A, <2 x double> %B) {
; Using registers other than v0, v1 are possible, but would be odd.
; UNE = !OEQ.
; CHECK: fcmeq {{v[0-9]+}}.2d, v0.2d, v1.2d
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp une <2 x double> %A, %B
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@ -1943,7 +1943,7 @@ define <2 x i32> @fcmueqz2xfloat(<2 x float> %A) {
; CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
; CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
; CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp ueq <2 x float> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
@ -1955,7 +1955,7 @@ define <4 x i32> @fcmueqz4xfloat(<4 x float> %A) {
; CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
; CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ueq <4 x float> %A, zeroinitializer
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@ -1967,7 +1967,7 @@ define <2 x i64> @fcmueqz2xdouble(<2 x double> %A) {
; CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
; CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ueq <2 x double> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@ -1977,7 +1977,7 @@ define <2 x i32> @fcmugez2xfloat(<2 x float> %A) {
; CHECK-LABEL: fcmugez2xfloat:
; UGE with zero = !OLT
; CHECK: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
; CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp uge <2 x float> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
@ -1987,7 +1987,7 @@ define <4 x i32> @fcmugez4xfloat(<4 x float> %A) {
; CHECK-LABEL: fcmugez4xfloat:
; UGE with zero = !OLT
; CHECK: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp uge <4 x float> %A, zeroinitializer
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@ -1996,7 +1996,7 @@ define <2 x i64> @fcmugez2xdouble(<2 x double> %A) {
; CHECK-LABEL: fcmugez2xdouble:
; UGE with zero = !OLT
; CHECK: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp uge <2 x double> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@ -2006,7 +2006,7 @@ define <2 x i32> @fcmugtz2xfloat(<2 x float> %A) {
; CHECK-LABEL: fcmugtz2xfloat:
; UGT with zero = !OLE
; CHECK: fcmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
; CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp ugt <2 x float> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
@ -2016,7 +2016,7 @@ define <4 x i32> @fcmugtz4xfloat(<4 x float> %A) {
; CHECK-LABEL: fcmugtz4xfloat:
; UGT with zero = !OLE
; CHECK: fcmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ugt <4 x float> %A, zeroinitializer
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@ -2025,7 +2025,7 @@ define <2 x i64> @fcmugtz2xdouble(<2 x double> %A) {
; CHECK-LABEL: fcmugtz2xdouble:
; UGT with zero = !OLE
; CHECK: fcmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ugt <2 x double> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@ -2035,7 +2035,7 @@ define <2 x i32> @fcmultz2xfloat(<2 x float> %A) {
; CHECK-LABEL: fcmultz2xfloat:
; ULT with zero = !OGE
; CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
; CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp ult <2 x float> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
@ -2044,7 +2044,7 @@ define <2 x i32> @fcmultz2xfloat(<2 x float> %A) {
define <4 x i32> @fcmultz4xfloat(<4 x float> %A) {
; CHECK-LABEL: fcmultz4xfloat:
; CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ult <4 x float> %A, zeroinitializer
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@ -2053,7 +2053,7 @@ define <4 x i32> @fcmultz4xfloat(<4 x float> %A) {
define <2 x i64> @fcmultz2xdouble(<2 x double> %A) {
; CHECK-LABEL: fcmultz2xdouble:
; CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ult <2 x double> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@ -2064,7 +2064,7 @@ define <2 x i32> @fcmulez2xfloat(<2 x float> %A) {
; CHECK-LABEL: fcmulez2xfloat:
; ULE with zero = !OGT
; CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
; CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp ule <2 x float> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
@ -2074,7 +2074,7 @@ define <4 x i32> @fcmulez4xfloat(<4 x float> %A) {
; CHECK-LABEL: fcmulez4xfloat:
; ULE with zero = !OGT
; CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ule <4 x float> %A, zeroinitializer
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@ -2084,7 +2084,7 @@ define <2 x i64> @fcmulez2xdouble(<2 x double> %A) {
; CHECK-LABEL: fcmulez2xdouble:
; ULE with zero = !OGT
; CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp ule <2 x double> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@ -2094,7 +2094,7 @@ define <2 x i32> @fcmunez2xfloat(<2 x float> %A) {
; CHECK-LABEL: fcmunez2xfloat:
; UNE with zero = !OEQ with zero
; CHECK: fcmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
; CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp une <2 x float> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
@ -2104,7 +2104,7 @@ define <4 x i32> @fcmunez4xfloat(<4 x float> %A) {
; CHECK-LABEL: fcmunez4xfloat:
; UNE with zero = !OEQ with zero
; CHECK: fcmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp une <4 x float> %A, zeroinitializer
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@ -2113,7 +2113,7 @@ define <2 x i64> @fcmunez2xdouble(<2 x double> %A) {
; CHECK-LABEL: fcmunez2xdouble:
; UNE with zero = !OEQ with zero
; CHECK: fcmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp une <2 x double> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@ -2126,7 +2126,7 @@ define <2 x i32> @fcmunoz2xfloat(<2 x float> %A) {
; CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
; CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #{{0.0|0}}
; CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = fcmp uno <2 x float> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
@ -2138,7 +2138,7 @@ define <4 x i32> @fcmunoz4xfloat(<4 x float> %A) {
; CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
; CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #{{0.0|0}}
; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp uno <4 x float> %A, zeroinitializer
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@ -2150,7 +2150,7 @@ define <2 x i64> @fcmunoz2xdouble(<2 x double> %A) {
; CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
; CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #{{0.0|0}}
; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
; CHECK-NEXT: {{mvn|not}} {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = fcmp uno <2 x double> %A, zeroinitializer
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4

View File

@ -260,21 +260,21 @@ define i32 @umovw8h(<8 x i16> %tmp1) {
define i32 @umovw4s(<4 x i32> %tmp1) {
; CHECK-LABEL: umovw4s:
; CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.s[2]
; CHECK: mov {{w[0-9]+}}, {{v[0-9]+}}.s[2]
%tmp3 = extractelement <4 x i32> %tmp1, i32 2
ret i32 %tmp3
}
define i64 @umovx2d(<2 x i64> %tmp1) {
; CHECK-LABEL: umovx2d:
; CHECK: umov {{x[0-9]+}}, {{v[0-9]+}}.d[1]
; CHECK: mov {{x[0-9]+}}, {{v[0-9]+}}.d[1]
%tmp3 = extractelement <2 x i64> %tmp1, i32 1
ret i64 %tmp3
}
define i32 @umovw8b(<8 x i8> %tmp1) {
; CHECK-LABEL: umovw8b:
; CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.b[7]
; CHECK: mov {{w[0-9]+}}, {{v[0-9]+}}.b[7]
%tmp3 = extractelement <8 x i8> %tmp1, i32 7
%tmp4 = zext i8 %tmp3 to i32
ret i32 %tmp4
@ -282,7 +282,7 @@ define i32 @umovw8b(<8 x i8> %tmp1) {
define i32 @umovw4h(<4 x i16> %tmp1) {
; CHECK-LABEL: umovw4h:
; CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.h[2]
; CHECK: mov {{w[0-9]+}}, {{v[0-9]+}}.h[2]
%tmp3 = extractelement <4 x i16> %tmp1, i32 2
%tmp4 = zext i16 %tmp3 to i32
ret i32 %tmp4
@ -290,7 +290,7 @@ define i32 @umovw4h(<4 x i16> %tmp1) {
define i32 @umovw2s(<2 x i32> %tmp1) {
; CHECK-LABEL: umovw2s:
; CHECK: umov {{w[0-9]+}}, {{v[0-9]+}}.s[1]
; CHECK: mov {{w[0-9]+}}, {{v[0-9]+}}.s[1]
%tmp3 = extractelement <2 x i32> %tmp1, i32 1
ret i32 %tmp3
}

View File

@ -51,7 +51,7 @@ define <2 x i64> @cmeq2xi64(<2 x i64> %A, <2 x i64> %B) {
define <8 x i8> @cmne8xi8(<8 x i8> %A, <8 x i8> %B) {
;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <8 x i8> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
@ -59,7 +59,7 @@ define <8 x i8> @cmne8xi8(<8 x i8> %A, <8 x i8> %B) {
define <16 x i8> @cmne16xi8(<16 x i8> %A, <16 x i8> %B) {
;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <16 x i8> %A, %B;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
@ -67,7 +67,7 @@ define <16 x i8> @cmne16xi8(<16 x i8> %A, <16 x i8> %B) {
define <4 x i16> @cmne4xi16(<4 x i16> %A, <4 x i16> %B) {
;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <4 x i16> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
@ -75,7 +75,7 @@ define <4 x i16> @cmne4xi16(<4 x i16> %A, <4 x i16> %B) {
define <8 x i16> @cmne8xi16(<8 x i16> %A, <8 x i16> %B) {
;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <8 x i16> %A, %B;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
@ -83,7 +83,7 @@ define <8 x i16> @cmne8xi16(<8 x i16> %A, <8 x i16> %B) {
define <2 x i32> @cmne2xi32(<2 x i32> %A, <2 x i32> %B) {
;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <2 x i32> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
@ -91,7 +91,7 @@ define <2 x i32> @cmne2xi32(<2 x i32> %A, <2 x i32> %B) {
define <4 x i32> @cmne4xi32(<4 x i32> %A, <4 x i32> %B) {
;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <4 x i32> %A, %B;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@ -99,7 +99,7 @@ define <4 x i32> @cmne4xi32(<4 x i32> %A, <4 x i32> %B) {
define <2 x i64> @cmne2xi64(<2 x i64> %A, <2 x i64> %B) {
;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <2 x i64> %A, %B;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@ -803,7 +803,7 @@ define <2 x i64> @cmltz2xi64(<2 x i64> %A) {
define <8 x i8> @cmneqz8xi8(<8 x i8> %A) {
;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <8 x i8> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
ret <8 x i8> %tmp4
@ -811,7 +811,7 @@ define <8 x i8> @cmneqz8xi8(<8 x i8> %A) {
define <16 x i8> @cmneqz16xi8(<16 x i8> %A) {
;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <16 x i8> %A, zeroinitializer;
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
ret <16 x i8> %tmp4
@ -819,7 +819,7 @@ define <16 x i8> @cmneqz16xi8(<16 x i8> %A) {
define <4 x i16> @cmneqz4xi16(<4 x i16> %A) {
;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <4 x i16> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
ret <4 x i16> %tmp4
@ -827,7 +827,7 @@ define <4 x i16> @cmneqz4xi16(<4 x i16> %A) {
define <8 x i16> @cmneqz8xi16(<8 x i16> %A) {
;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <8 x i16> %A, zeroinitializer;
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
@ -835,7 +835,7 @@ define <8 x i16> @cmneqz8xi16(<8 x i16> %A) {
define <2 x i32> @cmneqz2xi32(<2 x i32> %A) {
;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
;CHECK-NEXT: mvn {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = icmp ne <2 x i32> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
ret <2 x i32> %tmp4
@ -843,7 +843,7 @@ define <2 x i32> @cmneqz2xi32(<2 x i32> %A) {
define <4 x i32> @cmneqz4xi32(<4 x i32> %A) {
;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <4 x i32> %A, zeroinitializer;
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@ -851,7 +851,7 @@ define <4 x i32> @cmneqz4xi32(<4 x i32> %A) {
define <2 x i64> @cmneqz2xi64(<2 x i64> %A) {
;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
;CHECK-NEXT: mvn {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = icmp ne <2 x i64> %A, zeroinitializer;
%tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4

View File

@ -2,7 +2,7 @@
define zeroext i8 @f1(<16 x i8> %a) {
; CHECK-LABEL: f1:
; CHECK: umov.b w0, v0[3]
; CHECK: mov.b w0, v0[3]
; CHECK-NEXT: ret
%vecext = extractelement <16 x i8> %a, i32 3
ret i8 %vecext
@ -10,7 +10,7 @@ define zeroext i8 @f1(<16 x i8> %a) {
define zeroext i16 @f2(<4 x i16> %a) {
; CHECK-LABEL: f2:
; CHECK: umov.h w0, v0[2]
; CHECK: mov.h w0, v0[2]
; CHECK-NEXT: ret
%vecext = extractelement <4 x i16> %a, i32 2
ret i16 %vecext
@ -18,7 +18,7 @@ define zeroext i16 @f2(<4 x i16> %a) {
define i32 @f3(<2 x i32> %a) {
; CHECK-LABEL: f3:
; CHECK: umov.s w0, v0[1]
; CHECK: mov.s w0, v0[1]
; CHECK-NEXT: ret
%vecext = extractelement <2 x i32> %a, i32 1
ret i32 %vecext
@ -26,7 +26,7 @@ define i32 @f3(<2 x i32> %a) {
define i64 @f4(<2 x i64> %a) {
; CHECK-LABEL: f4:
; CHECK: umov.d x0, v0[1]
; CHECK: mov.d x0, v0[1]
; CHECK-NEXT: ret
%vecext = extractelement <2 x i64> %a, i32 1
ret i64 %vecext

View File

@ -229,7 +229,7 @@ define <1 x i64> @fcmlt_d(<1 x double> %A, <1 x double> %B) nounwind {
define <1 x i64> @cmnez_d(<1 x i64> %A) nounwind {
; CHECK-LABEL: cmnez_d:
; CHECK: cmeq d[[EQ:[0-9]+]], d0, #0
; CHECK: not.8b v0, v[[EQ]]
; CHECK: mvn.8b v0, v[[EQ]]
%tst = icmp ne <1 x i64> %A, zeroinitializer
%mask = sext <1 x i1> %tst to <1 x i64>
ret <1 x i64> %mask

View File

@ -17,15 +17,15 @@
mov v20.s[0], w30
mov v1.d[1], x7
// CHECK: ins v2.b[2], w1 // encoding: [0x22,0x1c,0x05,0x4e]
// CHECK: ins v7.h[7], w14 // encoding: [0xc7,0x1d,0x1e,0x4e]
// CHECK: ins v20.s[0], w30 // encoding: [0xd4,0x1f,0x04,0x4e]
// CHECK: ins v1.d[1], x7 // encoding: [0xe1,0x1c,0x18,0x4e]
// CHECK: {{mov|ins}} v2.b[2], w1 // encoding: [0x22,0x1c,0x05,0x4e]
// CHECK: {{mov|ins}} v7.h[7], w14 // encoding: [0xc7,0x1d,0x1e,0x4e]
// CHECK: {{mov|ins}} v20.s[0], w30 // encoding: [0xd4,0x1f,0x04,0x4e]
// CHECK: {{mov|ins}} v1.d[1], x7 // encoding: [0xe1,0x1c,0x18,0x4e]
// CHECK: ins v2.b[2], w1 // encoding: [0x22,0x1c,0x05,0x4e]
// CHECK: ins v7.h[7], w14 // encoding: [0xc7,0x1d,0x1e,0x4e]
// CHECK: ins v20.s[0], w30 // encoding: [0xd4,0x1f,0x04,0x4e]
// CHECK: ins v1.d[1], x7 // encoding: [0xe1,0x1c,0x18,0x4e]
// CHECK: {{mov|ins}} v2.b[2], w1 // encoding: [0x22,0x1c,0x05,0x4e]
// CHECK: {{mov|ins}} v7.h[7], w14 // encoding: [0xc7,0x1d,0x1e,0x4e]
// CHECK: {{mov|ins}} v20.s[0], w30 // encoding: [0xd4,0x1f,0x04,0x4e]
// CHECK: {{mov|ins}} v1.d[1], x7 // encoding: [0xe1,0x1c,0x18,0x4e]
//------------------------------------------------------------------------------
@ -55,13 +55,13 @@
mov w20, v9.s[2]
mov x7, v18.d[1]
// CHECK: umov w1, v0.b[15] // encoding: [0x01,0x3c,0x1f,0x0e]
// CHECK: umov w14, v6.h[4] // encoding: [0xce,0x3c,0x12,0x0e]
// CHECK: umov w20, v9.s[2] // encoding: [0x34,0x3d,0x14,0x0e]
// CHECK: umov x7, v18.d[1] // encoding: [0x47,0x3e,0x18,0x4e]
// CHECK: {{mov|umov}} w1, v0.b[15] // encoding: [0x01,0x3c,0x1f,0x0e]
// CHECK: {{mov|umov}} w14, v6.h[4] // encoding: [0xce,0x3c,0x12,0x0e]
// CHECK: {{mov|umov}} w20, v9.s[2] // encoding: [0x34,0x3d,0x14,0x0e]
// CHECK: {{mov|umov}} x7, v18.d[1] // encoding: [0x47,0x3e,0x18,0x4e]
// CHECK: umov w20, v9.s[2] // encoding: [0x34,0x3d,0x14,0x0e]
// CHECK: umov x7, v18.d[1] // encoding: [0x47,0x3e,0x18,0x4e]
// CHECK: {{mov|umov}} w20, v9.s[2] // encoding: [0x34,0x3d,0x14,0x0e]
// CHECK: {{mov|umov}} x7, v18.d[1] // encoding: [0x47,0x3e,0x18,0x4e]
//------------------------------------------------------------------------------
// Insert element (vector, from element)
@ -77,15 +77,15 @@
mov v15.s[3], v22.s[2]
mov v0.d[0], v4.d[1]
// CHECK: ins v1.b[14], v3.b[6] // encoding: [0x61,0x34,0x1d,0x6e]
// CHECK: ins v6.h[7], v7.h[5] // encoding: [0xe6,0x54,0x1e,0x6e]
// CHECK: ins v15.s[3], v22.s[2] // encoding: [0xcf,0x46,0x1c,0x6e]
// CHECK: ins v0.d[0], v4.d[1] // encoding: [0x80,0x44,0x08,0x6e]
// CHECK: {{mov|ins}} v1.b[14], v3.b[6] // encoding: [0x61,0x34,0x1d,0x6e]
// CHECK: {{mov|ins}} v6.h[7], v7.h[5] // encoding: [0xe6,0x54,0x1e,0x6e]
// CHECK: {{mov|ins}} v15.s[3], v22.s[2] // encoding: [0xcf,0x46,0x1c,0x6e]
// CHECK: {{mov|ins}} v0.d[0], v4.d[1] // encoding: [0x80,0x44,0x08,0x6e]
// CHECK: ins v1.b[14], v3.b[6] // encoding: [0x61,0x34,0x1d,0x6e]
// CHECK: ins v6.h[7], v7.h[5] // encoding: [0xe6,0x54,0x1e,0x6e]
// CHECK: ins v15.s[3], v22.s[2] // encoding: [0xcf,0x46,0x1c,0x6e]
// CHECK: ins v0.d[0], v4.d[1] // encoding: [0x80,0x44,0x08,0x6e]
// CHECK: {{mov|ins}} v1.b[14], v3.b[6] // encoding: [0x61,0x34,0x1d,0x6e]
// CHECK: {{mov|ins}} v6.h[7], v7.h[5] // encoding: [0xe6,0x54,0x1e,0x6e]
// CHECK: {{mov|ins}} v15.s[3], v22.s[2] // encoding: [0xcf,0x46,0x1c,0x6e]
// CHECK: {{mov|ins}} v0.d[0], v4.d[1] // encoding: [0x80,0x44,0x08,0x6e]
//------------------------------------------------------------------------------
// Duplicate to all lanes( vector, from element)
@ -98,13 +98,13 @@
dup v17.4s, v20.s[0]
dup v5.2d, v1.d[1]
// CHECK: dup v1.8b, v2.b[2] // encoding: [0x41,0x04,0x05,0x0e]
// CHECK: dup v11.4h, v7.h[7] // encoding: [0xeb,0x04,0x1e,0x0e]
// CHECK: dup v17.2s, v20.s[0] // encoding: [0x91,0x06,0x04,0x0e]
// CHECK: dup v1.16b, v2.b[2] // encoding: [0x41,0x04,0x05,0x4e]
// CHECK: dup v11.8h, v7.h[7] // encoding: [0xeb,0x04,0x1e,0x4e]
// CHECK: dup v17.4s, v20.s[0] // encoding: [0x91,0x06,0x04,0x4e]
// CHECK: dup v5.2d, v1.d[1] // encoding: [0x25,0x04,0x18,0x4e]
// CHECK: {{mov|dup}} v1.8b, v2.b[2] // encoding: [0x41,0x04,0x05,0x0e]
// CHECK: {{mov|dup}} v11.4h, v7.h[7] // encoding: [0xeb,0x04,0x1e,0x0e]
// CHECK: {{mov|dup}} v17.2s, v20.s[0] // encoding: [0x91,0x06,0x04,0x0e]
// CHECK: {{mov|dup}} v1.16b, v2.b[2] // encoding: [0x41,0x04,0x05,0x4e]
// CHECK: {{mov|dup}} v11.8h, v7.h[7] // encoding: [0xeb,0x04,0x1e,0x4e]
// CHECK: {{mov|dup}} v17.4s, v20.s[0] // encoding: [0x91,0x06,0x04,0x4e]
// CHECK: {{mov|dup}} v5.2d, v1.d[1] // encoding: [0x25,0x04,0x18,0x4e]
//------------------------------------------------------------------------------
// Duplicate to all lanes( vector, from main)
@ -117,13 +117,13 @@
dup v17.4s, w28
dup v5.2d, x0
// CHECK: dup v1.8b, w1 // encoding: [0x21,0x0c,0x01,0x0e]
// CHECK: dup v11.4h, w14 // encoding: [0xcb,0x0d,0x02,0x0e]
// CHECK: dup v17.2s, w30 // encoding: [0xd1,0x0f,0x04,0x0e]
// CHECK: dup v1.16b, w2 // encoding: [0x41,0x0c,0x01,0x4e]
// CHECK: dup v11.8h, w16 // encoding: [0x0b,0x0e,0x02,0x4e]
// CHECK: dup v17.4s, w28 // encoding: [0x91,0x0f,0x04,0x4e]
// CHECK: dup v5.2d, x0 // encoding: [0x05,0x0c,0x08,0x4e]
// CHECK: {{mov|dup}} v1.8b, w1 // encoding: [0x21,0x0c,0x01,0x0e]
// CHECK: {{mov|dup}} v11.4h, w14 // encoding: [0xcb,0x0d,0x02,0x0e]
// CHECK: {{mov|dup}} v17.2s, w30 // encoding: [0xd1,0x0f,0x04,0x0e]
// CHECK: {{mov|dup}} v1.16b, w2 // encoding: [0x41,0x0c,0x01,0x4e]
// CHECK: {{mov|dup}} v11.8h, w16 // encoding: [0x0b,0x0e,0x02,0x4e]
// CHECK: {{mov|dup}} v17.4s, w28 // encoding: [0x91,0x0f,0x04,0x4e]
// CHECK: {{mov|dup}} v5.2d, x0 // encoding: [0x05,0x0c,0x08,0x4e]

View File

@ -282,8 +282,8 @@
not v0.16b, v31.16b
not v1.8b, v9.8b
// CHECK: not v0.16b, v31.16b // encoding: [0xe0,0x5b,0x20,0x6e]
// CHECK: not v1.8b, v9.8b // encoding: [0x21,0x59,0x20,0x2e]
// CHECK: {{mvn|not}} v0.16b, v31.16b // encoding: [0xe0,0x5b,0x20,0x6e]
// CHECK: {{mvn|not}} v1.8b, v9.8b // encoding: [0x21,0x59,0x20,0x2e]
//------------------------------------------------------------------------------
// Bitwise reverse

View File

@ -193,10 +193,10 @@ foo:
; CHECK: smov.s x3, v2[2] ; encoding: [0x43,0x2c,0x14,0x4e]
; CHECK: smov.s x3, v2[2] ; encoding: [0x43,0x2c,0x14,0x4e]
; CHECK: umov.s w3, v2[2] ; encoding: [0x43,0x3c,0x14,0x0e]
; CHECK: umov.s w3, v2[2] ; encoding: [0x43,0x3c,0x14,0x0e]
; CHECK: umov.d x3, v2[1] ; encoding: [0x43,0x3c,0x18,0x4e]
; CHECK: umov.d x3, v2[1] ; encoding: [0x43,0x3c,0x18,0x4e]
; CHECK: mov.s w3, v2[2] ; encoding: [0x43,0x3c,0x14,0x0e]
; CHECK: mov.s w3, v2[2] ; encoding: [0x43,0x3c,0x14,0x0e]
; CHECK: mov.d x3, v2[1] ; encoding: [0x43,0x3c,0x18,0x4e]
; CHECK: mov.d x3, v2[1] ; encoding: [0x43,0x3c,0x18,0x4e]
; MOV aliases for UMOV instructions above
@ -205,10 +205,10 @@ foo:
mov.d x11, v13[1]
mov x17, v19.d[0]
; CHECK: umov.s w2, v3[3] ; encoding: [0x62,0x3c,0x1c,0x0e]
; CHECK: umov.s w5, v7[2] ; encoding: [0xe5,0x3c,0x14,0x0e]
; CHECK: umov.d x11, v13[1] ; encoding: [0xab,0x3d,0x18,0x4e]
; CHECK: umov.d x17, v19[0] ; encoding: [0x71,0x3e,0x08,0x4e]
; CHECK: mov.s w2, v3[3] ; encoding: [0x62,0x3c,0x1c,0x0e]
; CHECK: mov.s w5, v7[2] ; encoding: [0xe5,0x3c,0x14,0x0e]
; CHECK: mov.d x11, v13[1] ; encoding: [0xab,0x3d,0x18,0x4e]
; CHECK: mov.d x17, v19[0] ; encoding: [0x71,0x3e,0x08,0x4e]
ins.d v2[1], x5
ins.s v2[1], w5
@ -534,7 +534,7 @@ foo:
; CHECK: frsqrte.2s v0, v0 ; encoding: [0x00,0xd8,0xa1,0x2e]
; CHECK: fsqrt.2s v0, v0 ; encoding: [0x00,0xf8,0xa1,0x2e]
; CHECK: neg.8b v0, v0 ; encoding: [0x00,0xb8,0x20,0x2e]
; CHECK: not.8b v0, v0 ; encoding: [0x00,0x58,0x20,0x2e]
; CHECK: mvn.8b v0, v0 ; encoding: [0x00,0x58,0x20,0x2e]
; CHECK: rbit.8b v0, v0 ; encoding: [0x00,0x58,0x60,0x2e]
; CHECK: rev16.8b v0, v0 ; encoding: [0x00,0x18,0x20,0x0e]
; CHECK: rev32.8b v0, v0 ; encoding: [0x00,0x08,0x20,0x2e]
@ -1881,10 +1881,10 @@ foo:
mvn.8b v10, v6
mvn.16b v11, v7
; CHECK: not.8b v1, v4 ; encoding: [0x81,0x58,0x20,0x2e]
; CHECK: not.16b v19, v17 ; encoding: [0x33,0x5a,0x20,0x6e]
; CHECK: not.8b v10, v6 ; encoding: [0xca,0x58,0x20,0x2e]
; CHECK: not.16b v11, v7 ; encoding: [0xeb,0x58,0x20,0x6e]
; CHECK: mvn.8b v1, v4 ; encoding: [0x81,0x58,0x20,0x2e]
; CHECK: mvn.16b v19, v17 ; encoding: [0x33,0x5a,0x20,0x6e]
; CHECK: mvn.8b v10, v6 ; encoding: [0xca,0x58,0x20,0x2e]
; CHECK: mvn.16b v11, v7 ; encoding: [0xeb,0x58,0x20,0x6e]
; sqdmull verbose mode aliases
sqdmull v10.4s, v12.4h, v12.4h

View File

@ -124,10 +124,10 @@
# CHECK: smov.s x3, v2[2]
# CHECK: smov.s x3, v2[2]
# CHECK: umov.s w3, v2[2]
# CHECK: umov.s w3, v2[2]
# CHECK: umov.d x3, v2[1]
# CHECK: umov.d x3, v2[1]
# CHECK: mov.s w3, v2[2]
# CHECK: mov.s w3, v2[2]
# CHECK: mov.d x3, v2[1]
# CHECK: mov.d x3, v2[1]
0xa2 0x1c 0x18 0x4e
0xa2 0x1c 0x0c 0x4e
@ -445,7 +445,7 @@
# CHECK: frsqrte.2s v0, v0
# CHECK: fsqrt.2s v0, v0
# CHECK: neg.8b v0, v0
# CHECK: not.8b v0, v0
# CHECK: mvn.8b v0, v0
# CHECK: rbit.8b v0, v0
# CHECK: rev16.8b v0, v0
# CHECK: rev32.8b v0, v0