now that we have CGT around, we can start using preferred types
for return values too. Instead of compiling something like: struct foo { int *X; float *Y; }; struct foo test(struct foo *P) { return *P; } to: %1 = type { i64, i64 } define %1 @test(%struct.foo* %P) nounwind { entry: %retval = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=2] %P.addr = alloca %struct.foo*, align 8 ; <%struct.foo**> [#uses=2] store %struct.foo* %P, %struct.foo** %P.addr %tmp = load %struct.foo** %P.addr ; <%struct.foo*> [#uses=1] %tmp1 = bitcast %struct.foo* %retval to i8* ; <i8*> [#uses=1] %tmp2 = bitcast %struct.foo* %tmp to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 16, i32 8, i1 false) %0 = bitcast %struct.foo* %retval to %1* ; <%1*> [#uses=1] %1 = load %1* %0, align 1 ; <%1> [#uses=1] ret %1 %1 } We now get the result more type safe, with: define %struct.foo @test(%struct.foo* %P) nounwind { entry: %retval = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=2] %P.addr = alloca %struct.foo*, align 8 ; <%struct.foo**> [#uses=2] store %struct.foo* %P, %struct.foo** %P.addr %tmp = load %struct.foo** %P.addr ; <%struct.foo*> [#uses=1] %tmp1 = bitcast %struct.foo* %retval to i8* ; <i8*> [#uses=1] %tmp2 = bitcast %struct.foo* %tmp to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 16, i32 8, i1 false) %0 = load %struct.foo* %retval ; <%struct.foo> [#uses=1] ret %struct.foo %0 } That memcpy is completely terrible, but I don't know how to fix it. llvm-svn: 109729
This commit is contained in:
parent
029c0f1681
commit
3a44c7e55d
|
@ -1243,6 +1243,7 @@ classifyReturnType(QualType RetTy) const {
|
|||
assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
|
||||
assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
|
||||
|
||||
const llvm::Type *IRType = 0;
|
||||
const llvm::Type *ResType = 0;
|
||||
switch (Lo) {
|
||||
case NoClass:
|
||||
|
@ -1260,7 +1261,10 @@ classifyReturnType(QualType RetTy) const {
|
|||
// AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
|
||||
// available register of the sequence %rax, %rdx is used.
|
||||
case Integer:
|
||||
ResType = Get8ByteTypeAtOffset(0, 0, RetTy, 0);
|
||||
if (IRType == 0)
|
||||
IRType = CGT.ConvertTypeRecursive(RetTy);
|
||||
|
||||
ResType = Get8ByteTypeAtOffset(IRType, 0, RetTy, 0);
|
||||
break;
|
||||
|
||||
// AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
|
||||
|
@ -1299,7 +1303,10 @@ classifyReturnType(QualType RetTy) const {
|
|||
break;
|
||||
|
||||
case Integer: {
|
||||
const llvm::Type *HiType = Get8ByteTypeAtOffset(0, 8, RetTy, 8);
|
||||
if (IRType == 0)
|
||||
IRType = CGT.ConvertTypeRecursive(RetTy);
|
||||
|
||||
const llvm::Type *HiType = Get8ByteTypeAtOffset(IRType, 8, RetTy, 8);
|
||||
ResType = llvm::StructType::get(getVMContext(), ResType, HiType, NULL);
|
||||
break;
|
||||
}
|
||||
|
@ -1456,7 +1463,6 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
|
|||
|
||||
void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
|
||||
|
||||
// Pass preferred type into classifyReturnType.
|
||||
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
|
||||
|
||||
// Keep track of the number of assigned registers.
|
||||
|
|
|
@ -159,4 +159,12 @@ v4f32 f25(v4f32 X) {
|
|||
return X+X;
|
||||
}
|
||||
|
||||
struct foo26 {
|
||||
int *X;
|
||||
float *Y;
|
||||
};
|
||||
|
||||
struct foo26 f26(struct foo26 *P) {
|
||||
// CHECK: define %struct.foo26 @f26(%struct.foo26* %P)
|
||||
return *P;
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ struct Coerce coerce_func(void);
|
|||
void Coerce_test(void) {
|
||||
struct Coerce c;
|
||||
|
||||
// CHECK: call i64 @coerce_func
|
||||
// CHECK: call i8* @coerce_func
|
||||
// CHECK: call i8* @objc_memmove_collectable(
|
||||
c = coerce_func();
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue