Just rip v2f32 support completely out of the X86 backend. In

the example in the testcase, we now generate:

_test1:                                 ## @test1
	movss	4(%esp), %xmm0
	addss	8(%esp), %xmm0
	movl	12(%esp), %eax
	movss	%xmm0, (%eax)
	ret

instead of:

_test1:                                                     ## @test1
	subl	$20, %esp
	movl	24(%esp), %eax
	movq	%mm0, (%esp)
	movq	%mm0, 8(%esp)
	movss	(%esp), %xmm0
	addss	12(%esp), %xmm0
	movss	%xmm0, (%eax)
	addl	$20, %esp
	ret

v2f32 support did not work reliably because most of the X86
backend didn't know it was legal.  It was apparently only added
to support returning source-level v2f32 values in MMX registers
in x86-32 mode.  If ABI compatibility is important on this
GCC-extended-vector type for some reason, then the frontend
should generate IR that returns v2i32 instead of v2f32.  However,
we generally don't try very hard to be abi compatible on gcc
extended vectors. 

llvm-svn: 107601
This commit is contained in:
Chris Lattner 2010-07-04 23:07:25 +00:00
parent 681b926d54
commit 45cc4d74a3
2 changed files with 17 additions and 28 deletions

View File

@ -618,11 +618,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
addRegisterClass(MVT::v4i16, X86::VR64RegisterClass, false);
addRegisterClass(MVT::v2i32, X86::VR64RegisterClass, false);
// FIXME: v2f32 isn't an MMX type. We currently claim that it is legal
// because of some ABI issue, but this isn't the right fix.
bool IsV2F32Legal = !Subtarget->is64Bit();
if (IsV2F32Legal)
addRegisterClass(MVT::v2f32, X86::VR64RegisterClass, false);
addRegisterClass(MVT::v1i64, X86::VR64RegisterClass, false);
setOperationAction(ISD::ADD, MVT::v8i8, Legal);
@ -668,17 +663,11 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64);
setOperationAction(ISD::LOAD, MVT::v2i32, Promote);
AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64);
if (IsV2F32Legal) {
setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
AddPromotedToType (ISD::LOAD, MVT::v2f32, MVT::v1i64);
}
setOperationAction(ISD::LOAD, MVT::v1i64, Legal);
setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom);
if (IsV2F32Legal)
setOperationAction(ISD::BUILD_VECTOR, MVT::v2f32, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
@ -686,8 +675,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom);
if (IsV2F32Legal)
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f32, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom);
@ -706,8 +693,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::BIT_CONVERT, MVT::v8i8, Custom);
setOperationAction(ISD::BIT_CONVERT, MVT::v4i16, Custom);
setOperationAction(ISD::BIT_CONVERT, MVT::v2i32, Custom);
if (IsV2F32Legal)
setOperationAction(ISD::BIT_CONVERT, MVT::v2f32, Custom);
setOperationAction(ISD::BIT_CONVERT, MVT::v1i64, Custom);
}
}
@ -4453,7 +4438,7 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
}
/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
/// ones, or rewriting v4i32 / v2f32 as 2 wide ones if possible. This can be
/// ones, or rewriting v4i32 / v2i32 as 2 wide ones if possible. This can be
/// done when every pair / quad of shuffle mask elements point to elements in
/// the right sequence. e.g.
/// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15>
@ -5078,13 +5063,9 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
SDValue
X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
if (Op.getValueType() == MVT::v2f32)
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f32,
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i32,
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32,
Op.getOperand(0))));
if (Op.getValueType() == MVT::v1i64 && Op.getOperand(0).getValueType() == MVT::i64)
if (Op.getValueType() == MVT::v1i64 &&
Op.getOperand(0).getValueType() == MVT::i64)
return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));

View File

@ -1,4 +1,5 @@
; RUN: llc < %s -march=x86-64 -asm-verbose=0 -o - | FileCheck %s
; RUN: llc < %s -march=x86-64 -asm-verbose=0 -o - | FileCheck %s -check-prefix=X64
; RUN: llc < %s -march=x86 -asm-verbose=0 -o - | FileCheck %s -check-prefix=X32
; PR7518
define void @test1(<2 x float> %Q, float *%P2) nounwind {
@ -8,9 +9,16 @@ define void @test1(<2 x float> %Q, float *%P2) nounwind {
store float %c, float* %P2
ret void
; CHECK: test1:
; CHECK-NEXT: addss %xmm1, %xmm0
; CHECK-NEXT: movss %xmm0, (%rdi)
; CHECK-NEXT: ret
; X64: test1:
; X64-NEXT: addss %xmm1, %xmm0
; X64-NEXT: movss %xmm0, (%rdi)
; X64-NEXT: ret
; X32: test1:
; X32-NEXT: movss 4(%esp), %xmm0
; X32-NEXT: addss 8(%esp), %xmm0
; X32-NEXT: movl 12(%esp), %eax
; X32-NEXT: movss %xmm0, (%eax)
; X32-NEXT: ret
}