[checked-arithmetic builtins] Added builtins to enable users to perform checked-arithmetic in c.

This will enable users in security critical applications to perform
checked-arithmetic in a fast safe manner that is amenable to c.

Tests/an update to Language Extensions is included as well.

rdar://13421498.

llvm-svn: 184497
This commit is contained in:
Michael Gottesman 2013-06-20 23:28:10 +00:00
parent 72ae1736b3
commit 930ecdb77b
5 changed files with 311 additions and 0 deletions

View File

@ -1566,6 +1566,49 @@ The complete list of builtins are:
unsigned long __builtin_subcl (unsigned long x, unsigned long y, unsigned long carryin, unsigned long *carryout);
unsigned long long __builtin_subcll(unsigned long long x, unsigned long long y, unsigned long long carryin, unsigned long long *carryout);
Checked Arithmetic Builtins
---------------------------
Clang provides a set of builtins that implement checked arithmetic for security
critical applications in a manner that is fast and easily expressable in C. As
an example of their usage:
.. code-block:: c
errorcode_t security_critical_application(...) {
unsigned x, y, result;
...
if (__builtin_umul_overflow(x, y, &result))
return kErrorCodeHackers;
...
use_multiply(result);
...
}
A complete enumeration of the builtins are:
.. code-block:: c
bool __builtin_uadd_overflow (unsigned x, unsigned y, unsigned *sum);
bool __builtin_uaddl_overflow (unsigned long x, unsigned long y, unsigned long *sum);
bool __builtin_uaddll_overflow(unsigned long long x, unsigned long long y, unsigned long long *sum);
bool __builtin_usub_overflow (unsigned x, unsigned y, unsigned *diff);
bool __builtin_usubl_overflow (unsigned long x, unsigned long y, unsigned long *diff);
bool __builtin_usubll_overflow(unsigned long long x, unsigned long long y, unsigned long long *diff);
bool __builtin_umul_overflow (unsigned x, unsigned y, unsigned *prod);
bool __builtin_umull_overflow (unsigned long x, unsigned long y, unsigned long *prod);
bool __builtin_umulll_overflow(unsigned long long x, unsigned long long y, unsigned long long *prod);
bool __builtin_sadd_overflow (int x, int y, int *sum);
bool __builtin_saddl_overflow (long x, long y, long *sum);
bool __builtin_saddll_overflow(long long x, long long y, long long *sum);
bool __builtin_ssub_overflow (int x, int y, int *diff);
bool __builtin_ssubl_overflow (long x, long y, long *diff);
bool __builtin_ssubll_overflow(long long x, long long y, long long *diff);
bool __builtin_smul_overflow (int x, int y, int *prod);
bool __builtin_smull_overflow (long x, long y, long *prod);
bool __builtin_smulll_overflow(long long x, long long y, long long *prod);
.. _langext-__c11_atomic:
__c11_atomic builtins

View File

@ -64,6 +64,8 @@ New Compiler Flags
C Language Changes in Clang
---------------------------
- Added new checked arithmetic builtins for security critical applications.
C11 Feature Support
^^^^^^^^^^^^^^^^^^^

View File

@ -939,5 +939,25 @@ BUILTIN(__builtin_subc, "UiUiCUiCUiCUi*", "n")
BUILTIN(__builtin_subcl, "ULiULiCULiCULiCULi*", "n")
BUILTIN(__builtin_subcll, "ULLiULLiCULLiCULLiCULLi*", "n")
// Checked Arithmetic Builtins for Security.
BUILTIN(__builtin_uadd_overflow, "bUiCUiCUi*", "n")
BUILTIN(__builtin_uaddl_overflow, "bULiCULiCULi*", "n")
BUILTIN(__builtin_uaddll_overflow, "bULLiCULLiCULLi*", "n")
BUILTIN(__builtin_usub_overflow, "bUiCUiCUi*", "n")
BUILTIN(__builtin_usubl_overflow, "bULiCULiCULi*", "n")
BUILTIN(__builtin_usubll_overflow, "bULLiCULLiCULLi*", "n")
BUILTIN(__builtin_umul_overflow, "bUiCUiCUi*", "n")
BUILTIN(__builtin_umull_overflow, "bULiCULiCULi*", "n")
BUILTIN(__builtin_umulll_overflow, "bULLiCULLiCULLi*", "n")
BUILTIN(__builtin_sadd_overflow, "bSiCSiCSi*", "n")
BUILTIN(__builtin_saddl_overflow, "bSLiCSLiCSLi*", "n")
BUILTIN(__builtin_saddll_overflow, "bSLLiCSLLiCSLLi*", "n")
BUILTIN(__builtin_ssub_overflow, "bSiCSiCSi*", "n")
BUILTIN(__builtin_ssubl_overflow, "bSLiCSLiCSLi*", "n")
BUILTIN(__builtin_ssubll_overflow, "bSLLiCSLLiCSLLi*", "n")
BUILTIN(__builtin_smul_overflow, "bSiCSiCSi*", "n")
BUILTIN(__builtin_smull_overflow, "bSLiCSLiCSLi*", "n")
BUILTIN(__builtin_smulll_overflow, "bSLLiCSLLiCSLLi*", "n")
#undef BUILTIN
#undef LIBBUILTIN

View File

@ -1414,6 +1414,77 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
CarryOutStore->setAlignment(CarryOutPtr.second);
return RValue::get(Sum2);
}
case Builtin::BI__builtin_uadd_overflow:
case Builtin::BI__builtin_uaddl_overflow:
case Builtin::BI__builtin_uaddll_overflow:
case Builtin::BI__builtin_usub_overflow:
case Builtin::BI__builtin_usubl_overflow:
case Builtin::BI__builtin_usubll_overflow:
case Builtin::BI__builtin_umul_overflow:
case Builtin::BI__builtin_umull_overflow:
case Builtin::BI__builtin_umulll_overflow:
case Builtin::BI__builtin_sadd_overflow:
case Builtin::BI__builtin_saddl_overflow:
case Builtin::BI__builtin_saddll_overflow:
case Builtin::BI__builtin_ssub_overflow:
case Builtin::BI__builtin_ssubl_overflow:
case Builtin::BI__builtin_ssubll_overflow:
case Builtin::BI__builtin_smul_overflow:
case Builtin::BI__builtin_smull_overflow:
case Builtin::BI__builtin_smulll_overflow: {
// We translate all of these builtins directly to the relevant llvm IR node.
// Scalarize our inputs.
llvm::Value *X = EmitScalarExpr(E->getArg(0));
llvm::Value *Y = EmitScalarExpr(E->getArg(1));
std::pair<llvm::Value *, unsigned> SumOutPtr =
EmitPointerWithAlignment(E->getArg(2));
// Decide which of the overflow intrinsics we are lowering to:
llvm::Intrinsic::ID IntrinsicId;
switch (BuiltinID) {
default: llvm_unreachable("Unknown security overflow builtin id.");
case Builtin::BI__builtin_uadd_overflow:
case Builtin::BI__builtin_uaddl_overflow:
case Builtin::BI__builtin_uaddll_overflow:
IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
break;
case Builtin::BI__builtin_usub_overflow:
case Builtin::BI__builtin_usubl_overflow:
case Builtin::BI__builtin_usubll_overflow:
IntrinsicId = llvm::Intrinsic::usub_with_overflow;
break;
case Builtin::BI__builtin_umul_overflow:
case Builtin::BI__builtin_umull_overflow:
case Builtin::BI__builtin_umulll_overflow:
IntrinsicId = llvm::Intrinsic::umul_with_overflow;
break;
case Builtin::BI__builtin_sadd_overflow:
case Builtin::BI__builtin_saddl_overflow:
case Builtin::BI__builtin_saddll_overflow:
IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
break;
case Builtin::BI__builtin_ssub_overflow:
case Builtin::BI__builtin_ssubl_overflow:
case Builtin::BI__builtin_ssubll_overflow:
IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
break;
case Builtin::BI__builtin_smul_overflow:
case Builtin::BI__builtin_smull_overflow:
case Builtin::BI__builtin_smulll_overflow:
IntrinsicId = llvm::Intrinsic::smul_with_overflow;
break;
}
llvm::Value *Carry;
llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
llvm::StoreInst *SumOutStore = Builder.CreateStore(Sum, SumOutPtr.first);
SumOutStore->setAlignment(SumOutPtr.second);
return RValue::get(Carry);
}
case Builtin::BI__noop:
return RValue::get(0);
}

View File

@ -0,0 +1,175 @@
// Test CodeGen for Security Check Overflow Builtins.
// rdar://13421498
// RUN: %clang_cc1 -triple "i686-unknown-unknown" -emit-llvm -x c %s -o - -O0 | FileCheck %s
// RUN: %clang_cc1 -triple "x86_64-unknown-unknown" -emit-llvm -x c %s -o - -O0 | FileCheck %s
// RUN: %clang_cc1 -triple "x86_64-mingw32" -emit-llvm -x c %s -o - -O0 | FileCheck %s
extern unsigned UnsignedErrorCode;
extern unsigned long UnsignedLongErrorCode;
extern unsigned long long UnsignedLongLongErrorCode;
extern int IntErrorCode;
extern long LongErrorCode;
extern long long LongLongErrorCode;
unsigned test_uadd_overflow(unsigned x, unsigned y) {
// CHECK: @test_uadd_overflow
// CHECK: %{{.+}} = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
unsigned result;
if (__builtin_uadd_overflow(x, y, &result))
return UnsignedErrorCode;
return result;
}
unsigned long test_uaddl_overflow(unsigned long x, unsigned long y) {
// CHECK: @test_uaddl_overflow([[UL:i32|i64]] %x
// CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.uadd.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
unsigned long result;
if (__builtin_uaddl_overflow(x, y, &result))
return UnsignedLongErrorCode;
return result;
}
unsigned long long test_uaddll_overflow(unsigned long long x, unsigned long long y) {
// CHECK: @test_uaddll_overflow
// CHECK: %{{.+}} = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
unsigned long long result;
if (__builtin_uaddll_overflow(x, y, &result))
return UnsignedLongLongErrorCode;
return result;
}
unsigned test_usub_overflow(unsigned x, unsigned y) {
// CHECK: @test_usub_overflow
// CHECK: %{{.+}} = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
unsigned result;
if (__builtin_usub_overflow(x, y, &result))
return UnsignedErrorCode;
return result;
}
unsigned long test_usubl_overflow(unsigned long x, unsigned long y) {
// CHECK: @test_usubl_overflow([[UL:i32|i64]] %x
// CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.usub.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
unsigned long result;
if (__builtin_usubl_overflow(x, y, &result))
return UnsignedLongErrorCode;
return result;
}
unsigned long long test_usubll_overflow(unsigned long long x, unsigned long long y) {
// CHECK: @test_usubll_overflow
// CHECK: %{{.+}} = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
unsigned long long result;
if (__builtin_usubll_overflow(x, y, &result))
return UnsignedLongLongErrorCode;
return result;
}
unsigned test_umul_overflow(unsigned x, unsigned y) {
// CHECK: @test_umul_overflow
// CHECK: %{{.+}} = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
unsigned result;
if (__builtin_umul_overflow(x, y, &result))
return UnsignedErrorCode;
return result;
}
unsigned long test_umull_overflow(unsigned long x, unsigned long y) {
// CHECK: @test_umull_overflow([[UL:i32|i64]] %x
// CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.umul.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
unsigned long result;
if (__builtin_umull_overflow(x, y, &result))
return UnsignedLongErrorCode;
return result;
}
unsigned long long test_umulll_overflow(unsigned long long x, unsigned long long y) {
// CHECK: @test_umulll_overflow
// CHECK: %{{.+}} = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
unsigned long long result;
if (__builtin_umulll_overflow(x, y, &result))
return UnsignedLongLongErrorCode;
return result;
}
int test_sadd_overflow(int x, int y) {
// CHECK: @test_sadd_overflow
// CHECK: %{{.+}} = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
int result;
if (__builtin_sadd_overflow(x, y, &result))
return IntErrorCode;
return result;
}
long test_saddl_overflow(long x, long y) {
// CHECK: @test_saddl_overflow([[UL:i32|i64]] %x
// CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.sadd.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
long result;
if (__builtin_saddl_overflow(x, y, &result))
return LongErrorCode;
return result;
}
long long test_saddll_overflow(long long x, long long y) {
// CHECK: @test_saddll_overflow
// CHECK: %{{.+}} = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
long long result;
if (__builtin_saddll_overflow(x, y, &result))
return LongLongErrorCode;
return result;
}
int test_ssub_overflow(int x, int y) {
// CHECK: @test_ssub_overflow
// CHECK: %{{.+}} = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
int result;
if (__builtin_ssub_overflow(x, y, &result))
return IntErrorCode;
return result;
}
long test_ssubl_overflow(long x, long y) {
// CHECK: @test_ssubl_overflow([[UL:i32|i64]] %x
// CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.ssub.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
long result;
if (__builtin_ssubl_overflow(x, y, &result))
return LongErrorCode;
return result;
}
long long test_ssubll_overflow(long long x, long long y) {
// CHECK: @test_ssubll_overflow
// CHECK: %{{.+}} = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
long long result;
if (__builtin_ssubll_overflow(x, y, &result))
return LongLongErrorCode;
return result;
}
int test_smul_overflow(int x, int y) {
// CHECK: @test_smul_overflow
// CHECK: %{{.+}} = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
int result;
if (__builtin_smul_overflow(x, y, &result))
return IntErrorCode;
return result;
}
long test_smull_overflow(long x, long y) {
// CHECK: @test_smull_overflow([[UL:i32|i64]] %x
// CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.smul.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
long result;
if (__builtin_smull_overflow(x, y, &result))
return LongErrorCode;
return result;
}
long long test_smulll_overflow(long long x, long long y) {
// CHECK: @test_smulll_overflow
// CHECK: %{{.+}} = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
long long result;
if (__builtin_smulll_overflow(x, y, &result))
return LongLongErrorCode;
return result;
}