[sanitizer] allocator: introduce kUseSeparateSizeClassForBatch (false by default). When true, it will cause all TransferBatches to be allocated on a separate dedicated size class, which improves security and may potentially simplify memory reclamation. However in the current state this may cause up to 3% extra memory usage. Subsequent changes should bring this overhead down

llvm-svn: 276377
This commit is contained in:
Kostya Serebryany 2016-07-22 01:13:13 +00:00
parent be6328b078
commit bfd69e0580
3 changed files with 52 additions and 10 deletions

View File

@ -92,8 +92,8 @@ struct SizeClassAllocatorLocalCache {
// For small size classes allocates the batch from the allocator.
// For large size classes simply returns b.
Batch *CreateBatch(uptr class_id, SizeClassAllocator *allocator, Batch *b) {
if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
return (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch)));
if (uptr batch_class_id = SizeClassMap::SizeClassForTransferBatch(class_id))
return (Batch*)Allocate(allocator, batch_class_id);
return b;
}
@ -101,8 +101,8 @@ struct SizeClassAllocatorLocalCache {
// For small size classes deallocates b to the allocator.
// Does notthing for large size classes.
void DestroyBatch(uptr class_id, SizeClassAllocator *allocator, Batch *b) {
if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b);
if (uptr batch_class_id = SizeClassMap::SizeClassForTransferBatch(class_id))
Deallocate(allocator, batch_class_id, b);
}
NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {

View File

@ -32,6 +32,9 @@
// - kMaxNumCached is the maximal number of chunks per size class.
// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
//
// There is one extra size class kBatchClassID that is used for allocating
// objects of TransferBatch type when kUseSeparateSizeClassForBatch is true.
//
// Part of output of SizeClassMap::Print():
// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
@ -97,11 +100,22 @@ class SizeClassMap {
uptr count;
void *batch[kMaxNumCached];
};
COMPILER_CHECK((sizeof(TransferBatch) & (sizeof(TransferBatch) - 1)) == 0);
static const uptr kBatchSize = sizeof(TransferBatch);
COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
// If true, all TransferBatch objects are allocated from kBatchClassID
// size class (except for those that are needed for kBatchClassID itself).
// The goal is to have TransferBatches in a totally different region of RAM
// to improve security and allow more efficient RAM reclamation.
// This is experimental and may currently increase memory usage by up to 3%
// in extreme cases.
static const bool kUseSeparateSizeClassForBatch = false;
static const uptr kMaxSize = 1UL << kMaxSizeLog;
static const uptr kNumClasses =
kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1 + 1;
static const uptr kBatchClassID = kNumClasses - 1;
COMPILER_CHECK(kNumClasses >= 32 && kNumClasses <= 256);
static const uptr kNumClassesRounded =
kNumClasses == 32 ? 32 :
@ -111,6 +125,8 @@ class SizeClassMap {
static uptr Size(uptr class_id) {
if (class_id <= kMidClass)
return kMinSize * class_id;
if (class_id == kBatchClassID)
return kBatchSize;
class_id -= kMidClass;
uptr t = kMidSize << (class_id >> S);
return t + (t >> S) * (class_id & M);
@ -144,6 +160,8 @@ class SizeClassMap {
uptr p = prev_s ? (d * 100 / prev_s) : 0;
uptr l = s ? MostSignificantSetBitIndex(s) : 0;
uptr cached = MaxCached(i) * s;
if (i == kBatchClassID)
d = l = p = 0;
Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
"cached: %zd %zd; id %zd\n",
i, Size(i), d, p, l, MaxCached(i), cached, ClassID(s));
@ -153,18 +171,23 @@ class SizeClassMap {
Printf("Total cached: %zd\n", total_cached);
}
static bool SizeClassRequiresSeparateTransferBatch(uptr class_id) {
return Size(class_id) < sizeof(TransferBatch) -
sizeof(uptr) * (kMaxNumCached - MaxCached(class_id));
static uptr SizeClassForTransferBatch(uptr class_id) {
if (kUseSeparateSizeClassForBatch)
return class_id == kBatchClassID ? 0 : kBatchClassID;
if (Size(class_id) < sizeof(TransferBatch) -
sizeof(uptr) * (kMaxNumCached - MaxCached(class_id)))
return ClassID(sizeof(TransferBatch));
return 0;
}
static void Validate() {
for (uptr c = 1; c < kNumClasses; c++) {
if (c == kBatchClassID) continue;
// Printf("Validate: c%zd\n", c);
uptr s = Size(c);
CHECK_NE(s, 0U);
CHECK_EQ(ClassID(s), c);
if (c != kNumClasses - 1)
if (c != kBatchClassID - 1 && c != kNumClasses - 1)
CHECK_EQ(ClassID(s + 1), c + 1);
CHECK_EQ(ClassID(s - 1), c);
if (c)

View File

@ -60,6 +60,25 @@ static void thread_dtor(void *v) {
allocator.SwallowCache(&cache);
}
static size_t GetRss() {
if (FILE *f = fopen("/proc/self/statm", "r")) {
size_t size = 0, rss = 0;
fscanf(f, "%zd %zd", &size, &rss);
fclose(f);
return rss << 12; // rss is in pages.
}
return 0;
}
struct AtExit {
~AtExit() {
allocator.PrintStats();
Printf("RSS: %zdM\n", GetRss() >> 20);
}
};
static AtExit at_exit;
static void NOINLINE thread_init() {
if (!global_inited) {
global_inited = true;