[AArch64] Removed unnecessary copy patterns with v1fx types.

- Copy patterns with float/double types are enough.
- Fix typos in test case names that were using v1fx.
- There is no ACLE intrinsic that uses v1f32 type.  And there is no conflict of
  neon and non-neon ovelapped operations with this type, so there is no need to
  support operations with this type.
- Remove v1f32 from FPR32 register and disallow v1f32 as a legal type for
  operations.

Patch by Ana Pazos!

llvm-svn: 197159
This commit is contained in:
Chad Rosier 2013-12-12 15:46:29 +00:00
parent 74f444cde5
commit 4055f42d22
5 changed files with 9 additions and 33 deletions

View File

@ -60,7 +60,7 @@ def CC_A64_APCS : CallingConv<[
// registers. This makes sense because the PCS does not distinguish Short
// Vectors and Floating-point types.
CCIfType<[v1i16, v2i8], CCBitConvertToType<f16>>,
CCIfType<[v1i32, v4i8, v2i16, v1f32], CCBitConvertToType<f32>>,
CCIfType<[v1i32, v4i8, v2i16], CCBitConvertToType<f32>>,
CCIfType<[v8i8, v4i16, v2i32, v2f32, v1i64, v1f64], CCBitConvertToType<f64>>,
CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
CCBitConvertToType<f128>>,

View File

@ -64,7 +64,6 @@ AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
addRegisterClass(MVT::v1i16, &AArch64::FPR16RegClass);
addRegisterClass(MVT::v1i32, &AArch64::FPR32RegClass);
addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass);
addRegisterClass(MVT::v1f32, &AArch64::FPR32RegClass);
addRegisterClass(MVT::v1f64, &AArch64::FPR64RegClass);
addRegisterClass(MVT::v8i8, &AArch64::FPR64RegClass);
addRegisterClass(MVT::v4i16, &AArch64::FPR64RegClass);
@ -296,7 +295,6 @@ AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v1f32, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v2f32, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v1f64, Custom);
@ -333,7 +331,6 @@ AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
setOperationAction(ISD::SETCC, MVT::v1i64, Custom);
setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
setOperationAction(ISD::SETCC, MVT::v1f32, Custom);
setOperationAction(ISD::SETCC, MVT::v2f32, Custom);
setOperationAction(ISD::SETCC, MVT::v4f32, Custom);
setOperationAction(ISD::SETCC, MVT::v1f64, Custom);

View File

@ -5995,12 +5995,6 @@ defm : NeonI_Scalar_DUP_Copy_pattern1<DUPhv_H,
defm : NeonI_Scalar_DUP_Copy_pattern1<DUPbv_B,
v1i8, v16i8, i32, neon_uimm4_bare,
v8i8, v16i8, neon_uimm3_bare>;
defm : NeonI_Scalar_DUP_Copy_pattern1<DUPdv_D,
v1f64, v2f64, f64, neon_uimm1_bare,
v1f64, v2f64, neon_uimm0_bare>;
defm : NeonI_Scalar_DUP_Copy_pattern1<DUPsv_S,
v1f32, v4f32, f32, neon_uimm2_bare,
v2f32, v4f32, neon_uimm1_bare>;
defm : NeonI_Scalar_DUP_Copy_pattern2<DUPdv_D,
v1i64, v2i64, i64, neon_uimm1_bare,
v1i64, v2i64, neon_uimm0_bare>;
@ -6013,12 +6007,6 @@ defm : NeonI_Scalar_DUP_Copy_pattern2<DUPhv_H,
defm : NeonI_Scalar_DUP_Copy_pattern2<DUPbv_B,
v1i8, v16i8, i32, neon_uimm4_bare,
v8i8, v16i8, neon_uimm3_bare>;
defm : NeonI_Scalar_DUP_Copy_pattern2<DUPdv_D,
v1f64, v2f64, f64, neon_uimm1_bare,
v1f64, v2f64, neon_uimm0_bare>;
defm : NeonI_Scalar_DUP_Copy_pattern2<DUPsv_S,
v1f32, v4f32, f32, neon_uimm2_bare,
v2f32, v4f32, neon_uimm1_bare>;
multiclass NeonI_Scalar_DUP_alias<string asmop, string asmlane,
Instruction DUPI, Operand OpImm,
@ -6123,7 +6111,6 @@ def : Pat<(v16i8 (bitconvert (v2f64 VPR128:$src))), (v16i8 VPR128:$src)>;
def : Pat<(f16 (bitconvert (v1i16 FPR16:$src))), (f16 FPR16:$src)>;
def : Pat<(f32 (bitconvert (v1i32 FPR32:$src))), (f32 FPR32:$src)>;
def : Pat<(f64 (bitconvert (v1i64 FPR64:$src))), (f64 FPR64:$src)>;
def : Pat<(f32 (bitconvert (v1f32 FPR32:$src))), (f32 FPR32:$src)>;
def : Pat<(f64 (bitconvert (v1f64 FPR64:$src))), (f64 FPR64:$src)>;
def : Pat<(i64 (bitconvert (v1i64 FPR64:$src))), (FMOVxd $src)>;
@ -6155,7 +6142,6 @@ def : Pat<(f128 (bitconvert (v2f64 VPR128:$src))), (f128 VPR128:$src)>;
def : Pat<(v1i16 (bitconvert (f16 FPR16:$src))), (v1i16 FPR16:$src)>;
def : Pat<(v1i32 (bitconvert (f32 FPR32:$src))), (v1i32 FPR32:$src)>;
def : Pat<(v1i64 (bitconvert (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
def : Pat<(v1f32 (bitconvert (f32 FPR32:$src))), (v1f32 FPR32:$src)>;
def : Pat<(v1f64 (bitconvert (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
def : Pat<(v1i64 (bitconvert (i64 GPR64:$src))), (FMOVdx $src)>;
@ -6688,9 +6674,6 @@ def : Pat<(i64 (vector_extract (v1i64 FPR64:$Rn), (i64 0))),
def : Pat<(f64 (vector_extract (v1f64 FPR64:$Rn), (i64 0))),
(f64 FPR64:$Rn)>;
def : Pat<(f32 (vector_extract (v1f32 FPR32:$Rn), (i64 0))),
(f32 FPR32:$Rn)>;
def : Pat<(v1i8 (scalar_to_vector GPR32:$Rn)),
(v1i8 (EXTRACT_SUBREG (v16i8
(INSbw (v16i8 (IMPLICIT_DEF)), $Rn, (i64 0))),
@ -6744,8 +6727,6 @@ def : Pat<(v2i32 (scalar_to_vector GPR32:$Rn)),
(INSsw (v4i32 (IMPLICIT_DEF)), $Rn, (i64 0))),
sub_64))>;
def : Pat<(v1f32 (scalar_to_vector (f32 FPR32:$Rn))),
(v1f32 FPR32:$Rn)>;
def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Rn))),
(v1f64 FPR64:$Rn)>;
@ -7467,8 +7448,6 @@ defm SQDMULLve : NI_2VE_v3_2op<0b0, 0b1011, "sqdmull">;
def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$src))),
(FMOVdd $src)>;
def : Pat<(v1f32 (scalar_to_vector (f32 FPR32:$src))),
(FMOVss $src)>;
// Pattern for lane in 128-bit vector
class NI_2VEL2_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,

View File

@ -155,7 +155,7 @@ def FPR16 : RegisterClass<"AArch64", [f16, v1i16], 16,
(sequence "H%u", 0, 31)> {
}
def FPR32 : RegisterClass<"AArch64", [f32, v1i32, v1f32], 32,
def FPR32 : RegisterClass<"AArch64", [f32, v1i32], 32,
(sequence "S%u", 0, 31)> {
}

View File

@ -20,8 +20,8 @@ define <2 x i32> @test_v8i8_to_v2i32(<8 x i8> %in) nounwind {
ret <2 x i32> %val
}
define <2 x float> @test_v8i8_to_v1f32(<8 x i8> %in) nounwind{
; CHECK: test_v8i8_to_v1f32:
define <2 x float> @test_v8i8_to_v2f32(<8 x i8> %in) nounwind{
; CHECK: test_v8i8_to_v2f32:
; CHECK-NEXT: // BB#0:
; CHECK-NEXT: ret
@ -67,8 +67,8 @@ define <2 x i32> @test_v4i16_to_v2i32(<4 x i16> %in) nounwind {
ret <2 x i32> %val
}
define <2 x float> @test_v4i16_to_v1f32(<4 x i16> %in) nounwind{
; CHECK: test_v4i16_to_v1f32:
define <2 x float> @test_v4i16_to_v2f32(<4 x i16> %in) nounwind{
; CHECK: test_v4i16_to_v2f32:
; CHECK-NEXT: // BB#0:
; CHECK-NEXT: ret
@ -114,8 +114,8 @@ define <2 x i32> @test_v2i32_to_v2i32(<2 x i32> %in) nounwind {
ret <2 x i32> %val
}
define <2 x float> @test_v2i32_to_v1f32(<2 x i32> %in) nounwind{
; CHECK: test_v2i32_to_v1f32:
define <2 x float> @test_v2i32_to_v2f32(<2 x i32> %in) nounwind{
; CHECK: test_v2i32_to_v2f32:
; CHECK-NEXT: // BB#0:
; CHECK-NEXT: ret