[scudo] Get rid of the thread local PRNG & header salt

Summary:
It was deemed that the salt in the chunk header didn't improve security
significantly (and could actually decrease it). The initial idea was that the
same chunk would different headers on different allocations, allowing for less
predictability. The issue is that gathering the same chunk header with different
salts can give information about the other "secrets" (cookie, pointer), and that
if an attacker leaks a header, they can reuse it anyway for that same chunk
anyway since we don't enforce the salt value.

So we get rid of the salt in the header. This means we also get rid of the
thread local Prng, and that we don't need a global Prng anymore as well. This
makes everything faster.

We reuse those 8 bits to store the `ClassId` of a chunk now (0 for a secondary
based allocation). This way, we get some additional speed gains:
- `ClassId` is computed outside of the locked block;
- `getActuallyAllocatedSize` doesn't need the `GetSizeClass` call;
- same for `deallocatePrimary`;
We add a sanity check at init for this new field (all sanity checks are moved
in their own function, `init` was getting crowded).

Reviewers: alekseyshl, flowerhack

Reviewed By: alekseyshl

Subscribers: llvm-commits

Differential Revision: https://reviews.llvm.org/D40796

llvm-svn: 319791
This commit is contained in:
Kostya Kortchinsky 2017-12-05 17:08:29 +00:00
parent 6e9ea3f55a
commit df6ba242bf
5 changed files with 63 additions and 115 deletions

View File

@ -30,7 +30,7 @@
namespace __scudo { namespace __scudo {
// Global static cookie, initialized at start-up. // Global static cookie, initialized at start-up.
static uptr Cookie; static u32 Cookie;
// We default to software CRC32 if the alternatives are not supported, either // We default to software CRC32 if the alternatives are not supported, either
// at compilation or at runtime. // at compilation or at runtime.
@ -66,7 +66,7 @@ struct ScudoChunk : UnpackedHeader {
// We can't use the offset member of the chunk itself, as we would double // We can't use the offset member of the chunk itself, as we would double
// fetch it without any warranty that it wouldn't have been tampered. To // fetch it without any warranty that it wouldn't have been tampered. To
// prevent this, we work with a local copy of the header. // prevent this, we work with a local copy of the header.
void *getAllocBeg(UnpackedHeader *Header) { void *getBackendPtr(UnpackedHeader *Header) {
return reinterpret_cast<void *>( return reinterpret_cast<void *>(
reinterpret_cast<uptr>(this) - (Header->Offset << MinAlignmentLog)); reinterpret_cast<uptr>(this) - (Header->Offset << MinAlignmentLog));
} }
@ -74,9 +74,9 @@ struct ScudoChunk : UnpackedHeader {
// Returns the usable size for a chunk, meaning the amount of bytes from the // Returns the usable size for a chunk, meaning the amount of bytes from the
// beginning of the user data to the end of the backend allocated chunk. // beginning of the user data to the end of the backend allocated chunk.
uptr getUsableSize(UnpackedHeader *Header) { uptr getUsableSize(UnpackedHeader *Header) {
uptr Size = const uptr Size =
getBackendAllocator().getActuallyAllocatedSize(getAllocBeg(Header), getBackendAllocator().getActuallyAllocatedSize(getBackendPtr(Header),
Header->FromPrimary); Header->ClassId);
if (Size == 0) if (Size == 0)
return 0; return 0;
return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog); return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog);
@ -88,8 +88,7 @@ struct ScudoChunk : UnpackedHeader {
ZeroChecksumHeader.Checksum = 0; ZeroChecksumHeader.Checksum = 0;
uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)]; uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder)); memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
u32 Crc = computeCRC32(static_cast<u32>(Cookie), u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(this), HeaderHolder,
reinterpret_cast<uptr>(this), HeaderHolder,
ARRAY_SIZE(HeaderHolder)); ARRAY_SIZE(HeaderHolder));
return static_cast<u16>(Crc); return static_cast<u16>(Crc);
} }
@ -176,9 +175,9 @@ struct QuarantineCallback {
Chunk); Chunk);
} }
Chunk->eraseHeader(); Chunk->eraseHeader();
void *Ptr = Chunk->getAllocBeg(&Header); void *Ptr = Chunk->getBackendPtr(&Header);
if (Header.FromPrimary) if (Header.ClassId)
getBackendAllocator().deallocatePrimary(Cache_, Ptr); getBackendAllocator().deallocatePrimary(Cache_, Ptr, Header.ClassId);
else else
getBackendAllocator().deallocateSecondary(Ptr); getBackendAllocator().deallocateSecondary(Ptr);
} }
@ -186,16 +185,17 @@ struct QuarantineCallback {
// Internal quarantine allocation and deallocation functions. We first check // Internal quarantine allocation and deallocation functions. We first check
// that the batches are indeed serviced by the Primary. // that the batches are indeed serviced by the Primary.
// TODO(kostyak): figure out the best way to protect the batches. // TODO(kostyak): figure out the best way to protect the batches.
COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
void *Allocate(uptr Size) { void *Allocate(uptr Size) {
return getBackendAllocator().allocatePrimary(Cache_, Size); return getBackendAllocator().allocatePrimary(Cache_, BatchClassId);
} }
void Deallocate(void *Ptr) { void Deallocate(void *Ptr) {
getBackendAllocator().deallocatePrimary(Cache_, Ptr); getBackendAllocator().deallocatePrimary(Cache_, Ptr, BatchClassId);
} }
AllocatorCache *Cache_; AllocatorCache *Cache_;
COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
}; };
typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine; typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine;
@ -217,9 +217,6 @@ struct ScudoAllocator {
ScudoBackendAllocator BackendAllocator; ScudoBackendAllocator BackendAllocator;
ScudoQuarantine AllocatorQuarantine; ScudoQuarantine AllocatorQuarantine;
StaticSpinMutex GlobalPrngMutex;
ScudoPrng GlobalPrng;
u32 QuarantineChunksUpToSize; u32 QuarantineChunksUpToSize;
bool DeallocationTypeMismatch; bool DeallocationTypeMismatch;
@ -235,10 +232,7 @@ struct ScudoAllocator {
explicit ScudoAllocator(LinkerInitialized) explicit ScudoAllocator(LinkerInitialized)
: AllocatorQuarantine(LINKER_INITIALIZED) {} : AllocatorQuarantine(LINKER_INITIALIZED) {}
void init() { void performSanityChecks() {
SanitizerToolName = "Scudo";
initFlags();
// Verify that the header offset field can hold the maximum offset. In the // Verify that the header offset field can hold the maximum offset. In the
// case of the Secondary allocator, it takes care of alignment and the // case of the Secondary allocator, it takes care of alignment and the
// offset will always be 0. In the case of the Primary, the worst case // offset will always be 0. In the case of the Primary, the worst case
@ -248,9 +242,9 @@ struct ScudoAllocator {
// result, the maximum offset will be at most the maximum alignment for the // result, the maximum offset will be at most the maximum alignment for the
// last size class minus the header size, in multiples of MinAlignment. // last size class minus the header size, in multiples of MinAlignment.
UnpackedHeader Header = {}; UnpackedHeader Header = {};
uptr MaxPrimaryAlignment = const uptr MaxPrimaryAlignment =
1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment); 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
uptr MaxOffset = const uptr MaxOffset =
(MaxPrimaryAlignment - AlignedChunkHeaderSize) >> MinAlignmentLog; (MaxPrimaryAlignment - AlignedChunkHeaderSize) >> MinAlignmentLog;
Header.Offset = MaxOffset; Header.Offset = MaxOffset;
if (Header.Offset != MaxOffset) { if (Header.Offset != MaxOffset) {
@ -262,13 +256,26 @@ struct ScudoAllocator {
// case scenario happens in the Primary. It will depend on the second to // case scenario happens in the Primary. It will depend on the second to
// last and last class sizes, as well as the dynamic base for the Primary. // last and last class sizes, as well as the dynamic base for the Primary.
// The following is an over-approximation that works for our needs. // The following is an over-approximation that works for our needs.
uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1; const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes; Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes) { if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes) {
dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in " dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in "
"the header\n"); "the header\n");
} }
const uptr LargestClassId = SizeClassMap::kLargestClassID;
Header.ClassId = LargestClassId;
if (Header.ClassId != LargestClassId) {
dieWithMessage("ERROR: the largest class ID doesn't fit in the header\n");
}
}
void init() {
SanitizerToolName = "Scudo";
initFlags();
performSanityChecks();
// Check if hardware CRC32 is supported in the binary and by the platform, // Check if hardware CRC32 is supported in the binary and by the platform,
// if so, opt for the CRC32 hardware version of the checksum. // if so, opt for the CRC32 hardware version of the checksum.
if (&computeHardwareCRC32 && hasHardwareCRC32()) if (&computeHardwareCRC32 && hasHardwareCRC32())
@ -286,8 +293,11 @@ struct ScudoAllocator {
DeleteSizeMismatch = getFlags()->DeleteSizeMismatch; DeleteSizeMismatch = getFlags()->DeleteSizeMismatch;
ZeroContents = getFlags()->ZeroContents; ZeroContents = getFlags()->ZeroContents;
GlobalPrng.init(); if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&Cookie), sizeof(Cookie),
Cookie = GlobalPrng.getU64(); /*blocking=*/false))) {
Cookie = static_cast<u32>((NanoTime() >> 12) ^
(reinterpret_cast<uptr>(this) >> 4));
}
CheckRssLimit = HardRssLimitMb || SoftRssLimitMb; CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
if (CheckRssLimit) if (CheckRssLimit)
@ -365,23 +375,21 @@ struct ScudoAllocator {
// Primary and Secondary backed allocations have a different treatment. We // Primary and Secondary backed allocations have a different treatment. We
// deal with alignment requirements of Primary serviced allocations here, // deal with alignment requirements of Primary serviced allocations here,
// but the Secondary will take care of its own alignment needs. // but the Secondary will take care of its own alignment needs.
bool FromPrimary = PrimaryAllocator::CanAllocate(AlignedSize, MinAlignment); const bool FromPrimary =
PrimaryAllocator::CanAllocate(AlignedSize, MinAlignment);
void *Ptr; void *Ptr;
u8 Salt; u8 ClassId;
uptr AllocSize; uptr AllocSize;
if (FromPrimary) { if (FromPrimary) {
AllocSize = AlignedSize; AllocSize = AlignedSize;
ClassId = SizeClassMap::ClassID(AllocSize);
ScudoTSD *TSD = getTSDAndLock(); ScudoTSD *TSD = getTSDAndLock();
Salt = TSD->Prng.getU8(); Ptr = BackendAllocator.allocatePrimary(&TSD->Cache, ClassId);
Ptr = BackendAllocator.allocatePrimary(&TSD->Cache, AllocSize);
TSD->unlock(); TSD->unlock();
} else { } else {
{
SpinMutexLock l(&GlobalPrngMutex);
Salt = GlobalPrng.getU8();
}
AllocSize = NeededSize; AllocSize = NeededSize;
ClassId = 0;
Ptr = BackendAllocator.allocateSecondary(AllocSize, Alignment); Ptr = BackendAllocator.allocateSecondary(AllocSize, Alignment);
} }
if (UNLIKELY(!Ptr)) if (UNLIKELY(!Ptr))
@ -389,26 +397,25 @@ struct ScudoAllocator {
// If requested, we will zero out the entire contents of the returned chunk. // If requested, we will zero out the entire contents of the returned chunk.
if ((ForceZeroContents || ZeroContents) && FromPrimary) if ((ForceZeroContents || ZeroContents) && FromPrimary)
memset(Ptr, 0, BackendAllocator.getActuallyAllocatedSize( memset(Ptr, 0, BackendAllocator.getActuallyAllocatedSize(Ptr, ClassId));
Ptr, /*FromPrimary=*/true));
UnpackedHeader Header = {}; UnpackedHeader Header = {};
uptr AllocBeg = reinterpret_cast<uptr>(Ptr); uptr BackendPtr = reinterpret_cast<uptr>(Ptr);
uptr UserBeg = AllocBeg + AlignedChunkHeaderSize; uptr UserBeg = BackendPtr + AlignedChunkHeaderSize;
if (UNLIKELY(!IsAligned(UserBeg, Alignment))) { if (UNLIKELY(!IsAligned(UserBeg, Alignment))) {
// Since the Secondary takes care of alignment, a non-aligned pointer // Since the Secondary takes care of alignment, a non-aligned pointer
// means it is from the Primary. It is also the only case where the offset // means it is from the Primary. It is also the only case where the offset
// field of the header would be non-zero. // field of the header would be non-zero.
CHECK(FromPrimary); CHECK(FromPrimary);
UserBeg = RoundUpTo(UserBeg, Alignment); UserBeg = RoundUpTo(UserBeg, Alignment);
uptr Offset = UserBeg - AlignedChunkHeaderSize - AllocBeg; uptr Offset = UserBeg - AlignedChunkHeaderSize - BackendPtr;
Header.Offset = Offset >> MinAlignmentLog; Header.Offset = Offset >> MinAlignmentLog;
} }
CHECK_LE(UserBeg + Size, AllocBeg + AllocSize); CHECK_LE(UserBeg + Size, BackendPtr + AllocSize);
Header.ClassId = ClassId;
Header.State = ChunkAllocated; Header.State = ChunkAllocated;
Header.AllocType = Type; Header.AllocType = Type;
if (FromPrimary) { if (FromPrimary) {
Header.FromPrimary = 1;
Header.SizeOrUnusedBytes = Size; Header.SizeOrUnusedBytes = Size;
} else { } else {
// The secondary fits the allocations to a page, so the amount of unused // The secondary fits the allocations to a page, so the amount of unused
@ -419,7 +426,6 @@ struct ScudoAllocator {
if (TrailingBytes) if (TrailingBytes)
Header.SizeOrUnusedBytes = PageSize - TrailingBytes; Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
} }
Header.Salt = Salt;
getScudoChunk(UserBeg)->storeHeader(&Header); getScudoChunk(UserBeg)->storeHeader(&Header);
void *UserPtr = reinterpret_cast<void *>(UserBeg); void *UserPtr = reinterpret_cast<void *>(UserBeg);
// if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size); // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size);
@ -435,10 +441,11 @@ struct ScudoAllocator {
(Size > QuarantineChunksUpToSize); (Size > QuarantineChunksUpToSize);
if (BypassQuarantine) { if (BypassQuarantine) {
Chunk->eraseHeader(); Chunk->eraseHeader();
void *Ptr = Chunk->getAllocBeg(Header); void *Ptr = Chunk->getBackendPtr(Header);
if (Header->FromPrimary) { if (Header->ClassId) {
ScudoTSD *TSD = getTSDAndLock(); ScudoTSD *TSD = getTSDAndLock();
getBackendAllocator().deallocatePrimary(&TSD->Cache, Ptr); getBackendAllocator().deallocatePrimary(&TSD->Cache, Ptr,
Header->ClassId);
TSD->unlock(); TSD->unlock();
} else { } else {
getBackendAllocator().deallocateSecondary(Ptr); getBackendAllocator().deallocateSecondary(Ptr);
@ -496,7 +503,7 @@ struct ScudoAllocator {
} }
} }
} }
uptr Size = Header.FromPrimary ? Header.SizeOrUnusedBytes : uptr Size = Header.ClassId ? Header.SizeOrUnusedBytes :
Chunk->getUsableSize(&Header) - Header.SizeOrUnusedBytes; Chunk->getUsableSize(&Header) - Header.SizeOrUnusedBytes;
if (DeleteSizeMismatch) { if (DeleteSizeMismatch) {
if (DeleteSize && DeleteSize != Size) { if (DeleteSize && DeleteSize != Size) {
@ -536,7 +543,7 @@ struct ScudoAllocator {
(UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) { (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
UnpackedHeader NewHeader = OldHeader; UnpackedHeader NewHeader = OldHeader;
NewHeader.SizeOrUnusedBytes = NewHeader.SizeOrUnusedBytes =
OldHeader.FromPrimary ? NewSize : UsableSize - NewSize; OldHeader.ClassId ? NewSize : UsableSize - NewSize;
Chunk->compareExchangeHeader(&NewHeader, &OldHeader); Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
return OldPtr; return OldPtr;
} }
@ -544,7 +551,7 @@ struct ScudoAllocator {
// old one. // old one.
void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc); void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
if (NewPtr) { if (NewPtr) {
uptr OldSize = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes : uptr OldSize = OldHeader.ClassId ? OldHeader.SizeOrUnusedBytes :
UsableSize - OldHeader.SizeOrUnusedBytes; UsableSize - OldHeader.SizeOrUnusedBytes;
memcpy(NewPtr, OldPtr, Min(NewSize, UsableSize)); memcpy(NewPtr, OldPtr, Min(NewSize, UsableSize));
quarantineOrDeallocateChunk(Chunk, &OldHeader, OldSize); quarantineOrDeallocateChunk(Chunk, &OldHeader, OldSize);
@ -608,7 +615,6 @@ void initScudo() {
void ScudoTSD::init(bool Shared) { void ScudoTSD::init(bool Shared) {
UnlockRequired = Shared; UnlockRequired = Shared;
getBackendAllocator().initCache(&Cache); getBackendAllocator().initCache(&Cache);
Prng.init();
memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder)); memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
} }

View File

@ -39,16 +39,15 @@ enum ChunkState : u8 {
typedef u64 PackedHeader; typedef u64 PackedHeader;
struct UnpackedHeader { struct UnpackedHeader {
u64 Checksum : 16; u64 Checksum : 16;
u64 SizeOrUnusedBytes : 19; // Size for Primary backed allocations, amount of u64 ClassId : 8;
u64 SizeOrUnusedBytes : 20; // Size for Primary backed allocations, amount of
// unused bytes in the chunk for Secondary ones. // unused bytes in the chunk for Secondary ones.
u64 FromPrimary : 1;
u64 State : 2; // available, allocated, or quarantined u64 State : 2; // available, allocated, or quarantined
u64 AllocType : 2; // malloc, new, new[], or memalign u64 AllocType : 2; // malloc, new, new[], or memalign
u64 Offset : 16; // Offset from the beginning of the backend u64 Offset : 16; // Offset from the beginning of the backend
// allocation to the beginning of the chunk // allocation to the beginning of the chunk
// itself, in multiples of MinAlignment. See // itself, in multiples of MinAlignment. See
// comment about its maximum value and in init(). // comment about its maximum value and in init().
u64 Salt : 8;
}; };
typedef atomic_uint64_t AtomicPackedHeader; typedef atomic_uint64_t AtomicPackedHeader;

View File

@ -31,8 +31,8 @@ class ScudoCombinedAllocator {
// Primary allocations are always MinAlignment aligned, and as such do not // Primary allocations are always MinAlignment aligned, and as such do not
// require an Alignment parameter. // require an Alignment parameter.
void *allocatePrimary(AllocatorCache *Cache, uptr Size) { void *allocatePrimary(AllocatorCache *Cache, uptr ClassId) {
return Cache->Allocate(&Primary, Primary.ClassID(Size)); return Cache->Allocate(&Primary, ClassId);
} }
// Secondary allocations do not require a Cache, but do require an Alignment // Secondary allocations do not require a Cache, but do require an Alignment
@ -41,17 +41,17 @@ class ScudoCombinedAllocator {
return Secondary.Allocate(&Stats, Size, Alignment); return Secondary.Allocate(&Stats, Size, Alignment);
} }
void deallocatePrimary(AllocatorCache *Cache, void *Ptr) { void deallocatePrimary(AllocatorCache *Cache, void *Ptr, uptr ClassId) {
Cache->Deallocate(&Primary, Primary.GetSizeClass(Ptr), Ptr); Cache->Deallocate(&Primary, ClassId, Ptr);
} }
void deallocateSecondary(void *Ptr) { void deallocateSecondary(void *Ptr) {
Secondary.Deallocate(&Stats, Ptr); Secondary.Deallocate(&Stats, Ptr);
} }
uptr getActuallyAllocatedSize(void *Ptr, bool FromPrimary) { uptr getActuallyAllocatedSize(void *Ptr, uptr ClassId) {
if (FromPrimary) if (ClassId)
return PrimaryAllocator::ClassIdToSize(Primary.GetSizeClass(Ptr)); return PrimaryAllocator::ClassIdToSize(ClassId);
return Secondary.GetActuallyAllocatedSize(Ptr); return Secondary.GetActuallyAllocatedSize(Ptr);
} }

View File

@ -25,7 +25,6 @@ namespace __scudo {
struct ALIGNED(64) ScudoTSD { struct ALIGNED(64) ScudoTSD {
AllocatorCache Cache; AllocatorCache Cache;
ScudoPrng Prng;
uptr QuarantineCachePlaceHolder[4]; uptr QuarantineCachePlaceHolder[4];
void init(bool Shared); void init(bool Shared);

View File

@ -31,62 +31,6 @@ INLINE Dest bit_cast(const Source& source) {
void NORETURN dieWithMessage(const char *Format, ...); void NORETURN dieWithMessage(const char *Format, ...);
bool hasHardwareCRC32(); bool hasHardwareCRC32();
INLINE u64 rotl(const u64 X, int K) {
return (X << K) | (X >> (64 - K));
}
// XoRoShiRo128+ PRNG (http://xoroshiro.di.unimi.it/).
struct XoRoShiRo128Plus {
public:
void init() {
if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(State), sizeof(State),
/*blocking=*/false))) {
// On some platforms, early processes like `init` do not have an
// initialized random pool (getrandom blocks and /dev/urandom doesn't
// exist yet), but we still have to provide them with some degree of
// entropy. Not having a secure seed is not as problematic for them, as
// they are less likely to be the target of heap based vulnerabilities
// exploitation attempts.
State[0] = NanoTime();
State[1] = 0;
}
fillCache();
}
u8 getU8() {
if (UNLIKELY(isCacheEmpty()))
fillCache();
const u8 Result = static_cast<u8>(CachedBytes & 0xff);
CachedBytes >>= 8;
CachedBytesAvailable--;
return Result;
}
u64 getU64() { return next(); }
private:
u8 CachedBytesAvailable;
u64 CachedBytes;
u64 State[2];
u64 next() {
const u64 S0 = State[0];
u64 S1 = State[1];
const u64 Result = S0 + S1;
S1 ^= S0;
State[0] = rotl(S0, 55) ^ S1 ^ (S1 << 14);
State[1] = rotl(S1, 36);
return Result;
}
bool isCacheEmpty() {
return CachedBytesAvailable == 0;
}
void fillCache() {
CachedBytes = next();
CachedBytesAvailable = sizeof(CachedBytes);
}
};
typedef XoRoShiRo128Plus ScudoPrng;
} // namespace __scudo } // namespace __scudo
#endif // SCUDO_UTILS_H_ #endif // SCUDO_UTILS_H_