2009-12-16 04:14:24 +08:00
|
|
|
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o %t %s
|
2010-04-22 03:10:54 +08:00
|
|
|
// RUN: FileCheck < %t %s
|
2009-02-14 10:09:24 +08:00
|
|
|
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: %0 = type { i64, double }
|
|
|
|
|
|
|
|
// CHECK: define signext i8 @f0()
|
2009-02-14 10:09:24 +08:00
|
|
|
char f0(void) {
|
2009-07-22 04:52:43 +08:00
|
|
|
return 0;
|
2009-02-14 10:09:24 +08:00
|
|
|
}
|
|
|
|
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: define signext i16 @f1()
|
2009-02-14 10:09:24 +08:00
|
|
|
short f1(void) {
|
2009-07-22 04:52:43 +08:00
|
|
|
return 0;
|
2009-02-14 10:09:24 +08:00
|
|
|
}
|
|
|
|
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: define i32 @f2()
|
2009-02-14 10:09:24 +08:00
|
|
|
int f2(void) {
|
2009-07-22 04:52:43 +08:00
|
|
|
return 0;
|
2009-02-14 10:09:24 +08:00
|
|
|
}
|
|
|
|
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: define float @f3()
|
2009-02-14 10:09:24 +08:00
|
|
|
float f3(void) {
|
2009-07-22 04:52:43 +08:00
|
|
|
return 0;
|
2009-02-14 10:09:24 +08:00
|
|
|
}
|
|
|
|
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: define double @f4()
|
2009-02-14 10:09:24 +08:00
|
|
|
double f4(void) {
|
2009-07-22 04:52:43 +08:00
|
|
|
return 0;
|
2009-02-14 10:09:24 +08:00
|
|
|
}
|
|
|
|
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: define x86_fp80 @f5()
|
2009-02-14 10:09:24 +08:00
|
|
|
long double f5(void) {
|
2009-07-22 04:52:43 +08:00
|
|
|
return 0;
|
2009-02-14 10:09:24 +08:00
|
|
|
}
|
|
|
|
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: define void @f6(i8 signext %a0, i16 signext %a1, i32 %a2, i64 %a3, i8* %a4)
|
2009-02-14 10:09:24 +08:00
|
|
|
void f6(char a0, short a1, int a2, long long a3, void *a4) {
|
|
|
|
}
|
2009-02-27 01:38:19 +08:00
|
|
|
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: define void @f7(i32 %a0)
|
|
|
|
typedef enum { A, B, C } e7;
|
|
|
|
void f7(e7 a0) {
|
2009-02-27 03:00:14 +08:00
|
|
|
}
|
2009-03-07 01:50:25 +08:00
|
|
|
|
|
|
|
// Test merging/passing of upper eightbyte with X87 class.
|
2010-04-22 03:10:54 +08:00
|
|
|
//
|
|
|
|
// CHECK: define %0 @f8_1()
|
2010-06-29 08:14:52 +08:00
|
|
|
// CHECK: define void @f8_2(i64 %a0.coerce0, double %a0.coerce1)
|
2009-03-07 01:50:25 +08:00
|
|
|
union u8 {
|
|
|
|
long double a;
|
|
|
|
int b;
|
|
|
|
};
|
2009-07-22 04:52:43 +08:00
|
|
|
union u8 f8_1() { while (1) {} }
|
2009-03-07 01:50:25 +08:00
|
|
|
void f8_2(union u8 a0) {}
|
2009-05-09 06:26:44 +08:00
|
|
|
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: define i64 @f9()
|
2009-07-22 04:52:43 +08:00
|
|
|
struct s9 { int a; int b; int : 0; } f9(void) { while (1) {} }
|
2009-05-09 06:26:44 +08:00
|
|
|
|
2010-06-29 08:14:52 +08:00
|
|
|
// CHECK: define void @f10(i64 %a0.coerce)
|
2009-05-09 06:26:44 +08:00
|
|
|
struct s10 { int a; int b; int : 0; };
|
|
|
|
void f10(struct s10 a0) {}
|
|
|
|
|
2010-04-22 03:49:55 +08:00
|
|
|
// CHECK: define void @f11(%struct.s19* sret %agg.result)
|
2009-07-22 04:52:43 +08:00
|
|
|
union { long double a; float b; } f11() { while (1) {} }
|
2009-05-12 23:22:40 +08:00
|
|
|
|
implement a todo: pass a eight-byte that consists of a
small integer + padding as that small integer. On code
like:
struct c { double x; int y; };
void bar(struct c C) { }
This means that we compile to:
define void @bar(double %C.coerce0, i32 %C.coerce1) nounwind {
entry:
%C = alloca %struct.c, align 8 ; <%struct.c*> [#uses=2]
%0 = getelementptr %struct.c* %C, i32 0, i32 0 ; <double*> [#uses=1]
store double %C.coerce0, double* %0
%1 = getelementptr %struct.c* %C, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 %C.coerce1, i32* %1
instead of:
define void @bar(double %C.coerce0, i64 %C.coerce1) nounwind {
entry:
%C = alloca %struct.c, align 8 ; <%struct.c*> [#uses=3]
%0 = bitcast %struct.c* %C to %0* ; <%0*> [#uses=2]
%1 = getelementptr %0* %0, i32 0, i32 0 ; <double*> [#uses=1]
store double %C.coerce0, double* %1
%2 = getelementptr %0* %0, i32 0, i32 1 ; <i64*> [#uses=1]
store i64 %C.coerce1, i64* %2
which gives SRoA heartburn.
This implements rdar://5711709, a nice low number :)
llvm-svn: 109737
2010-07-29 15:30:00 +08:00
|
|
|
// CHECK: define i32 @f12_0()
|
|
|
|
// CHECK: define void @f12_1(i32 %a0.coerce)
|
2009-05-14 02:54:26 +08:00
|
|
|
struct s12 { int a __attribute__((aligned(16))); };
|
2009-07-22 04:52:43 +08:00
|
|
|
struct s12 f12_0(void) { while (1) {} }
|
2009-05-14 02:54:26 +08:00
|
|
|
void f12_1(struct s12 a0) {}
|
|
|
|
|
2009-05-23 01:33:44 +08:00
|
|
|
// Check that sret parameter is accounted for when checking available integer
|
|
|
|
// registers.
|
2010-07-06 04:21:00 +08:00
|
|
|
// CHECK: define void @f13(%struct.s13_0* sret %agg.result, i32 %a, i32 %b, i32 %c, i32 %d, {{.*}}* byval %e, i32 %f)
|
2009-05-23 01:33:44 +08:00
|
|
|
|
|
|
|
struct s13_0 { long long f0[3]; };
|
2009-08-24 03:28:59 +08:00
|
|
|
struct s13_1 { long long f0[2]; };
|
2010-04-22 03:10:54 +08:00
|
|
|
struct s13_0 f13(int a, int b, int c, int d,
|
2009-08-24 03:28:59 +08:00
|
|
|
struct s13_1 e, int f) { while (1) {} }
|
2009-05-23 01:33:44 +08:00
|
|
|
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: define void @f14({{.*}}, i8 signext %X)
|
|
|
|
void f14(int a, int b, int c, int d, int e, int f, char X) {}
|
|
|
|
|
|
|
|
// CHECK: define void @f15({{.*}}, i8* %X)
|
|
|
|
void f15(int a, int b, int c, int d, int e, int f, void *X) {}
|
|
|
|
|
|
|
|
// CHECK: define void @f16({{.*}}, float %X)
|
2009-05-27 00:37:37 +08:00
|
|
|
void f16(float a, float b, float c, float d, float e, float f, float g, float h,
|
|
|
|
float X) {}
|
2010-04-22 03:10:54 +08:00
|
|
|
|
|
|
|
// CHECK: define void @f17({{.*}}, x86_fp80 %X)
|
2009-05-27 00:37:37 +08:00
|
|
|
void f17(float a, float b, float c, float d, float e, float f, float g, float h,
|
|
|
|
long double X) {}
|
|
|
|
|
X86-64:
pass/return structs of float/int as float/i32 instead of double/i64
to make the code generated for ABI cleaner. Passing in the low part
of a double is the same as passing in a float.
For example, we now compile:
struct DeclGroup { float NumDecls; };
float foo(DeclGroup D);
void bar(DeclGroup *D) {
foo(*D);
}
into:
%struct.DeclGroup = type { float }
define void @_Z3barP9DeclGroup(%struct.DeclGroup* %D) nounwind {
entry:
%D.addr = alloca %struct.DeclGroup*, align 8 ; <%struct.DeclGroup**> [#uses=2]
%agg.tmp = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2]
store %struct.DeclGroup* %D, %struct.DeclGroup** %D.addr
%tmp = load %struct.DeclGroup** %D.addr ; <%struct.DeclGroup*> [#uses=1]
%tmp1 = bitcast %struct.DeclGroup* %agg.tmp to i8* ; <i8*> [#uses=1]
%tmp2 = bitcast %struct.DeclGroup* %tmp to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 4, i32 4, i1 false)
%coerce.dive = getelementptr %struct.DeclGroup* %agg.tmp, i32 0, i32 0 ; <float*> [#uses=1]
%0 = load float* %coerce.dive, align 1 ; <float> [#uses=1]
%call = call float @_Z3foo9DeclGroup(float %0) ; <float> [#uses=0]
ret void
}
instead of:
%struct.DeclGroup = type { float }
define void @_Z3barP9DeclGroup(%struct.DeclGroup* %D) nounwind {
entry:
%D.addr = alloca %struct.DeclGroup*, align 8 ; <%struct.DeclGroup**> [#uses=2]
%agg.tmp = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2]
%tmp3 = alloca double ; <double*> [#uses=2]
store %struct.DeclGroup* %D, %struct.DeclGroup** %D.addr
%tmp = load %struct.DeclGroup** %D.addr ; <%struct.DeclGroup*> [#uses=1]
%tmp1 = bitcast %struct.DeclGroup* %agg.tmp to i8* ; <i8*> [#uses=1]
%tmp2 = bitcast %struct.DeclGroup* %tmp to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 4, i32 4, i1 false)
%coerce.dive = getelementptr %struct.DeclGroup* %agg.tmp, i32 0, i32 0 ; <float*> [#uses=1]
%0 = bitcast double* %tmp3 to float* ; <float*> [#uses=1]
%1 = load float* %coerce.dive ; <float> [#uses=1]
store float %1, float* %0, align 1
%2 = load double* %tmp3 ; <double> [#uses=1]
%call = call float @_Z3foo9DeclGroup(double %2) ; <float> [#uses=0]
ret void
}
which is this machine code (at -O0):
__Z3barP9DeclGroup:
subq $24, %rsp
movq %rdi, 16(%rsp)
movq 16(%rsp), %rdi
leaq 8(%rsp), %rax
movl (%rdi), %ecx
movl %ecx, (%rax)
movss 8(%rsp), %xmm0
callq __Z3foo9DeclGroup
addq $24, %rsp
ret
vs this:
__Z3barP9DeclGroup:
subq $24, %rsp
movq %rdi, 16(%rsp)
movq 16(%rsp), %rdi
leaq 8(%rsp), %rax
movl (%rdi), %ecx
movl %ecx, (%rax)
movss 8(%rsp), %xmm0
movss %xmm0, (%rsp)
movsd (%rsp), %xmm0
callq __Z3foo9DeclGroup
addq $24, %rsp
ret
At -O3, it is the difference between this now:
__Z3barP9DeclGroup:
movss (%rdi), %xmm0
jmp __Z3foo9DeclGroup # TAILCALL
vs this before:
__Z3barP9DeclGroup:
movl (%rdi), %eax
movd %rax, %xmm0
jmp __Z3foo9DeclGroup # TAILCALL
llvm-svn: 107048
2010-06-29 03:56:59 +08:00
|
|
|
// Check for valid coercion. The struct should be passed/returned as i32, not
|
|
|
|
// as i64 for better code quality.
|
|
|
|
// rdar://8135035
|
2010-06-29 08:14:52 +08:00
|
|
|
// CHECK: define void @f18(i32 %a, i32 %f18_arg1.coerce)
|
2009-08-24 03:28:59 +08:00
|
|
|
struct f18_s0 { int f0; };
|
|
|
|
void f18(int a, struct f18_s0 f18_arg1) { while (1) {} }
|
2009-06-05 15:58:54 +08:00
|
|
|
|
2010-04-22 03:49:55 +08:00
|
|
|
// Check byval alignment.
|
|
|
|
|
|
|
|
// CHECK: define void @f19(%struct.s19* byval align 16 %x)
|
|
|
|
struct s19 {
|
|
|
|
long double a;
|
|
|
|
};
|
|
|
|
void f19(struct s19 x) {}
|
|
|
|
|
|
|
|
// CHECK: define void @f20(%struct.s20* byval align 32 %x)
|
|
|
|
struct __attribute__((aligned(32))) s20 {
|
|
|
|
int x;
|
|
|
|
int y;
|
|
|
|
};
|
|
|
|
void f20(struct s20 x) {}
|
Change X86_64ABIInfo to have ASTContext and TargetData ivars to
avoid passing ASTContext down through all the methods it has.
When classifying an argument, or argument piece, as INTEGER, check
to see if we have a pointer at exactly the same offset in the
preferred type. If so, use that pointer type instead of i64. This
allows us to compile A function taking a stringref into something
like this:
define i8* @foo(i64 %D.coerce0, i8* %D.coerce1) nounwind ssp {
entry:
%D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=4]
%0 = getelementptr %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1]
store i64 %D.coerce0, i64* %0
%1 = getelementptr %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1]
store i8* %D.coerce1, i8** %1
%tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1]
%tmp1 = load i64* %tmp ; <i64> [#uses=1]
%tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1]
%tmp3 = load i8** %tmp2 ; <i8*> [#uses=1]
%add.ptr = getelementptr inbounds i8* %tmp3, i64 %tmp1 ; <i8*> [#uses=1]
ret i8* %add.ptr
}
instead of this:
define i8* @foo(i64 %D.coerce0, i64 %D.coerce1) nounwind ssp {
entry:
%D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=3]
%0 = insertvalue %0 undef, i64 %D.coerce0, 0 ; <%0> [#uses=1]
%1 = insertvalue %0 %0, i64 %D.coerce1, 1 ; <%0> [#uses=1]
%2 = bitcast %struct.DeclGroup* %D to %0* ; <%0*> [#uses=1]
store %0 %1, %0* %2, align 1
%tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1]
%tmp1 = load i64* %tmp ; <i64> [#uses=1]
%tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1]
%tmp3 = load i8** %tmp2 ; <i8*> [#uses=1]
%add.ptr = getelementptr inbounds i8* %tmp3, i64 %tmp1 ; <i8*> [#uses=1]
ret i8* %add.ptr
}
This implements rdar://7375902 - [codegen quality] clang x86-64 ABI lowering code punishing StringRef
llvm-svn: 107123
2010-06-29 14:01:59 +08:00
|
|
|
|
|
|
|
struct StringRef {
|
|
|
|
long x;
|
|
|
|
const char *Ptr;
|
|
|
|
};
|
|
|
|
|
|
|
|
// rdar://7375902
|
|
|
|
// CHECK: define i8* @f21(i64 %S.coerce0, i8* %S.coerce1)
|
|
|
|
const char *f21(struct StringRef S) { return S.x+S.Ptr; }
|
|
|
|
|
2010-07-06 04:21:00 +08:00
|
|
|
// PR7567
|
|
|
|
typedef __attribute__ ((aligned(16))) struct f22s { unsigned long long x[2]; } L;
|
|
|
|
void f22(L x, L y) { }
|
|
|
|
// CHECK: @f22
|
|
|
|
// CHECK: %x = alloca{{.*}}, align 16
|
|
|
|
// CHECK: %y = alloca{{.*}}, align 16
|
|
|
|
|
|
|
|
|
2010-07-29 06:15:08 +08:00
|
|
|
|
|
|
|
// PR7714
|
|
|
|
struct f23S {
|
|
|
|
short f0;
|
|
|
|
unsigned f1;
|
|
|
|
int f2;
|
|
|
|
};
|
|
|
|
|
2010-07-29 07:06:14 +08:00
|
|
|
|
2010-07-29 06:15:08 +08:00
|
|
|
void f23(int A, struct f23S B) {
|
|
|
|
// CHECK: define void @f23(i32 %A, i64 %B.coerce0, i32 %B.coerce1)
|
|
|
|
}
|
|
|
|
|
2010-07-29 07:06:14 +08:00
|
|
|
struct f24s { long a; int b; };
|
2010-07-29 06:15:08 +08:00
|
|
|
|
2010-07-29 07:06:14 +08:00
|
|
|
struct f23S f24(struct f23S *X, struct f24s *P2) {
|
|
|
|
return *X;
|
|
|
|
|
|
|
|
// CHECK: define %struct.f24s @f24(%struct.f23S* %X, %struct.f24s* %P2)
|
|
|
|
}
|
2010-07-29 06:15:08 +08:00
|
|
|
|
Kill off the 'coerce' ABI passing form. Now 'direct' and 'extend' always
have a "coerce to" type which often matches the default lowering of Clang
type to LLVM IR type, but the coerce case can be handled by making them
not be the same.
This simplifies things and fixes issues where X86-64 abi lowering would
return coerce after making preferred types exactly match up. This caused
us to compile:
typedef float v4f32 __attribute__((__vector_size__(16)));
v4f32 foo(v4f32 X) {
return X+X;
}
into this code at -O0:
define <4 x float> @foo(<4 x float> %X.coerce) nounwind {
entry:
%retval = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2]
%coerce = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2]
%X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3]
store <4 x float> %X.coerce, <4 x float>* %coerce
%X = load <4 x float>* %coerce ; <<4 x float>> [#uses=1]
store <4 x float> %X, <4 x float>* %X.addr
%tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1]
store <4 x float> %add, <4 x float>* %retval
%0 = load <4 x float>* %retval ; <<4 x float>> [#uses=1]
ret <4 x float> %0
}
Now we get:
define <4 x float> @foo(<4 x float> %X) nounwind {
entry:
%X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3]
store <4 x float> %X, <4 x float>* %X.addr
%tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1]
ret <4 x float> %add
}
This implements rdar://8248065
llvm-svn: 109733
2010-07-29 14:26:06 +08:00
|
|
|
// rdar://8248065
|
2010-07-29 07:47:21 +08:00
|
|
|
typedef float v4f32 __attribute__((__vector_size__(16)));
|
|
|
|
v4f32 f25(v4f32 X) {
|
Kill off the 'coerce' ABI passing form. Now 'direct' and 'extend' always
have a "coerce to" type which often matches the default lowering of Clang
type to LLVM IR type, but the coerce case can be handled by making them
not be the same.
This simplifies things and fixes issues where X86-64 abi lowering would
return coerce after making preferred types exactly match up. This caused
us to compile:
typedef float v4f32 __attribute__((__vector_size__(16)));
v4f32 foo(v4f32 X) {
return X+X;
}
into this code at -O0:
define <4 x float> @foo(<4 x float> %X.coerce) nounwind {
entry:
%retval = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2]
%coerce = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2]
%X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3]
store <4 x float> %X.coerce, <4 x float>* %coerce
%X = load <4 x float>* %coerce ; <<4 x float>> [#uses=1]
store <4 x float> %X, <4 x float>* %X.addr
%tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1]
store <4 x float> %add, <4 x float>* %retval
%0 = load <4 x float>* %retval ; <<4 x float>> [#uses=1]
ret <4 x float> %0
}
Now we get:
define <4 x float> @foo(<4 x float> %X) nounwind {
entry:
%X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3]
store <4 x float> %X, <4 x float>* %X.addr
%tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1]
ret <4 x float> %add
}
This implements rdar://8248065
llvm-svn: 109733
2010-07-29 14:26:06 +08:00
|
|
|
// CHECK: define <4 x float> @f25(<4 x float> %X)
|
|
|
|
// CHECK-NOT: alloca
|
2010-07-30 01:14:05 +08:00
|
|
|
// CHECK: alloca <4 x float>
|
Kill off the 'coerce' ABI passing form. Now 'direct' and 'extend' always
have a "coerce to" type which often matches the default lowering of Clang
type to LLVM IR type, but the coerce case can be handled by making them
not be the same.
This simplifies things and fixes issues where X86-64 abi lowering would
return coerce after making preferred types exactly match up. This caused
us to compile:
typedef float v4f32 __attribute__((__vector_size__(16)));
v4f32 foo(v4f32 X) {
return X+X;
}
into this code at -O0:
define <4 x float> @foo(<4 x float> %X.coerce) nounwind {
entry:
%retval = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2]
%coerce = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2]
%X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3]
store <4 x float> %X.coerce, <4 x float>* %coerce
%X = load <4 x float>* %coerce ; <<4 x float>> [#uses=1]
store <4 x float> %X, <4 x float>* %X.addr
%tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1]
store <4 x float> %add, <4 x float>* %retval
%0 = load <4 x float>* %retval ; <<4 x float>> [#uses=1]
ret <4 x float> %0
}
Now we get:
define <4 x float> @foo(<4 x float> %X) nounwind {
entry:
%X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3]
store <4 x float> %X, <4 x float>* %X.addr
%tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1]
ret <4 x float> %add
}
This implements rdar://8248065
llvm-svn: 109733
2010-07-29 14:26:06 +08:00
|
|
|
// CHECK-NOT: alloca
|
2010-07-30 01:14:05 +08:00
|
|
|
// CHECK: store <4 x float> %X, <4 x float>*
|
Kill off the 'coerce' ABI passing form. Now 'direct' and 'extend' always
have a "coerce to" type which often matches the default lowering of Clang
type to LLVM IR type, but the coerce case can be handled by making them
not be the same.
This simplifies things and fixes issues where X86-64 abi lowering would
return coerce after making preferred types exactly match up. This caused
us to compile:
typedef float v4f32 __attribute__((__vector_size__(16)));
v4f32 foo(v4f32 X) {
return X+X;
}
into this code at -O0:
define <4 x float> @foo(<4 x float> %X.coerce) nounwind {
entry:
%retval = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2]
%coerce = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2]
%X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3]
store <4 x float> %X.coerce, <4 x float>* %coerce
%X = load <4 x float>* %coerce ; <<4 x float>> [#uses=1]
store <4 x float> %X, <4 x float>* %X.addr
%tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1]
store <4 x float> %add, <4 x float>* %retval
%0 = load <4 x float>* %retval ; <<4 x float>> [#uses=1]
ret <4 x float> %0
}
Now we get:
define <4 x float> @foo(<4 x float> %X) nounwind {
entry:
%X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3]
store <4 x float> %X, <4 x float>* %X.addr
%tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1]
ret <4 x float> %add
}
This implements rdar://8248065
llvm-svn: 109733
2010-07-29 14:26:06 +08:00
|
|
|
// CHECK-NOT: store
|
|
|
|
// CHECK: ret <4 x float>
|
2010-07-29 07:47:21 +08:00
|
|
|
return X+X;
|
|
|
|
}
|
|
|
|
|
now that we have CGT around, we can start using preferred types
for return values too. Instead of compiling something like:
struct foo {
int *X;
float *Y;
};
struct foo test(struct foo *P) { return *P; }
to:
%1 = type { i64, i64 }
define %1 @test(%struct.foo* %P) nounwind {
entry:
%retval = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=2]
%P.addr = alloca %struct.foo*, align 8 ; <%struct.foo**> [#uses=2]
store %struct.foo* %P, %struct.foo** %P.addr
%tmp = load %struct.foo** %P.addr ; <%struct.foo*> [#uses=1]
%tmp1 = bitcast %struct.foo* %retval to i8* ; <i8*> [#uses=1]
%tmp2 = bitcast %struct.foo* %tmp to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 16, i32 8, i1 false)
%0 = bitcast %struct.foo* %retval to %1* ; <%1*> [#uses=1]
%1 = load %1* %0, align 1 ; <%1> [#uses=1]
ret %1 %1
}
We now get the result more type safe, with:
define %struct.foo @test(%struct.foo* %P) nounwind {
entry:
%retval = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=2]
%P.addr = alloca %struct.foo*, align 8 ; <%struct.foo**> [#uses=2]
store %struct.foo* %P, %struct.foo** %P.addr
%tmp = load %struct.foo** %P.addr ; <%struct.foo*> [#uses=1]
%tmp1 = bitcast %struct.foo* %retval to i8* ; <i8*> [#uses=1]
%tmp2 = bitcast %struct.foo* %tmp to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 16, i32 8, i1 false)
%0 = load %struct.foo* %retval ; <%struct.foo> [#uses=1]
ret %struct.foo %0
}
That memcpy is completely terrible, but I don't know how to fix it.
llvm-svn: 109729
2010-07-29 12:46:19 +08:00
|
|
|
struct foo26 {
|
|
|
|
int *X;
|
|
|
|
float *Y;
|
|
|
|
};
|
2010-07-29 07:47:21 +08:00
|
|
|
|
now that we have CGT around, we can start using preferred types
for return values too. Instead of compiling something like:
struct foo {
int *X;
float *Y;
};
struct foo test(struct foo *P) { return *P; }
to:
%1 = type { i64, i64 }
define %1 @test(%struct.foo* %P) nounwind {
entry:
%retval = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=2]
%P.addr = alloca %struct.foo*, align 8 ; <%struct.foo**> [#uses=2]
store %struct.foo* %P, %struct.foo** %P.addr
%tmp = load %struct.foo** %P.addr ; <%struct.foo*> [#uses=1]
%tmp1 = bitcast %struct.foo* %retval to i8* ; <i8*> [#uses=1]
%tmp2 = bitcast %struct.foo* %tmp to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 16, i32 8, i1 false)
%0 = bitcast %struct.foo* %retval to %1* ; <%1*> [#uses=1]
%1 = load %1* %0, align 1 ; <%1> [#uses=1]
ret %1 %1
}
We now get the result more type safe, with:
define %struct.foo @test(%struct.foo* %P) nounwind {
entry:
%retval = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=2]
%P.addr = alloca %struct.foo*, align 8 ; <%struct.foo**> [#uses=2]
store %struct.foo* %P, %struct.foo** %P.addr
%tmp = load %struct.foo** %P.addr ; <%struct.foo*> [#uses=1]
%tmp1 = bitcast %struct.foo* %retval to i8* ; <i8*> [#uses=1]
%tmp2 = bitcast %struct.foo* %tmp to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 16, i32 8, i1 false)
%0 = load %struct.foo* %retval ; <%struct.foo> [#uses=1]
ret %struct.foo %0
}
That memcpy is completely terrible, but I don't know how to fix it.
llvm-svn: 109729
2010-07-29 12:46:19 +08:00
|
|
|
struct foo26 f26(struct foo26 *P) {
|
|
|
|
// CHECK: define %struct.foo26 @f26(%struct.foo26* %P)
|
|
|
|
return *P;
|
|
|
|
}
|
2010-07-29 13:02:29 +08:00
|
|
|
|
|
|
|
|
|
|
|
struct v4f32wrapper {
|
|
|
|
v4f32 v;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct v4f32wrapper f27(struct v4f32wrapper X) {
|
|
|
|
// CHECK: define <4 x float> @f27(<4 x float> %X.coerce)
|
|
|
|
return X;
|
implement a todo: pass a eight-byte that consists of a
small integer + padding as that small integer. On code
like:
struct c { double x; int y; };
void bar(struct c C) { }
This means that we compile to:
define void @bar(double %C.coerce0, i32 %C.coerce1) nounwind {
entry:
%C = alloca %struct.c, align 8 ; <%struct.c*> [#uses=2]
%0 = getelementptr %struct.c* %C, i32 0, i32 0 ; <double*> [#uses=1]
store double %C.coerce0, double* %0
%1 = getelementptr %struct.c* %C, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 %C.coerce1, i32* %1
instead of:
define void @bar(double %C.coerce0, i64 %C.coerce1) nounwind {
entry:
%C = alloca %struct.c, align 8 ; <%struct.c*> [#uses=3]
%0 = bitcast %struct.c* %C to %0* ; <%0*> [#uses=2]
%1 = getelementptr %0* %0, i32 0, i32 0 ; <double*> [#uses=1]
store double %C.coerce0, double* %1
%2 = getelementptr %0* %0, i32 0, i32 1 ; <i64*> [#uses=1]
store i64 %C.coerce1, i64* %2
which gives SRoA heartburn.
This implements rdar://5711709, a nice low number :)
llvm-svn: 109737
2010-07-29 15:30:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// rdar://5711709
|
|
|
|
struct f28c {
|
|
|
|
double x;
|
|
|
|
int y;
|
|
|
|
};
|
|
|
|
void f28(struct f28c C) {
|
|
|
|
// CHECK: define void @f28(double %C.coerce0, i32 %C.coerce1)
|
|
|
|
}
|
|
|
|
|
This is a little bit far, but optimize cases like:
struct a {
struct c {
double x;
int y;
} x[1];
};
void foo(struct a A) {
}
into:
define void @foo(double %A.coerce0, i32 %A.coerce1) nounwind {
entry:
%A = alloca %struct.a, align 8 ; <%struct.a*> [#uses=1]
%0 = bitcast %struct.a* %A to %struct.c* ; <%struct.c*> [#uses=2]
%1 = getelementptr %struct.c* %0, i32 0, i32 0 ; <double*> [#uses=1]
store double %A.coerce0, double* %1
%2 = getelementptr %struct.c* %0, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 %A.coerce1, i32* %2
instead of:
define void @foo(double %A.coerce0, i64 %A.coerce1) nounwind {
entry:
%A = alloca %struct.a, align 8 ; <%struct.a*> [#uses=1]
%0 = bitcast %struct.a* %A to %0* ; <%0*> [#uses=2]
%1 = getelementptr %0* %0, i32 0, i32 0 ; <double*> [#uses=1]
store double %A.coerce0, double* %1
%2 = getelementptr %0* %0, i32 0, i32 1 ; <i64*> [#uses=1]
store i64 %A.coerce1, i64* %2
I only do this now because I never want to look at this code again :)
llvm-svn: 109738
2010-07-29 15:43:55 +08:00
|
|
|
struct f29a {
|
|
|
|
struct c {
|
|
|
|
double x;
|
|
|
|
int y;
|
|
|
|
} x[1];
|
|
|
|
};
|
|
|
|
|
|
|
|
void f29a(struct f29a A) {
|
|
|
|
// CHECK: define void @f29a(double %A.coerce0, i32 %A.coerce1)
|
|
|
|
}
|