Mercurial > hg > CbC > CbC_llvm
diff compiler-rt/lib/gwp_asan/common.cpp @ 150:1d019706d866
LLVM10
author | anatofuz |
---|---|
date | Thu, 13 Feb 2020 15:10:13 +0900 |
parents | |
children | 0572611fdcc8 |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/compiler-rt/lib/gwp_asan/common.cpp Thu Feb 13 15:10:13 2020 +0900 @@ -0,0 +1,100 @@ +//===-- common.cpp ----------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "gwp_asan/common.h" +#include "gwp_asan/stack_trace_compressor.h" + +#include <assert.h> + +using AllocationMetadata = gwp_asan::AllocationMetadata; +using Error = gwp_asan::Error; + +namespace gwp_asan { + +const char *ErrorToString(const Error &E) { + switch (E) { + case Error::UNKNOWN: + return "Unknown"; + case Error::USE_AFTER_FREE: + return "Use After Free"; + case Error::DOUBLE_FREE: + return "Double Free"; + case Error::INVALID_FREE: + return "Invalid (Wild) Free"; + case Error::BUFFER_OVERFLOW: + return "Buffer Overflow"; + case Error::BUFFER_UNDERFLOW: + return "Buffer Underflow"; + } + __builtin_trap(); +} + +void AllocationMetadata::RecordAllocation(uintptr_t AllocAddr, + size_t AllocSize) { + Addr = AllocAddr; + Size = AllocSize; + IsDeallocated = false; + + AllocationTrace.ThreadID = getThreadID(); + DeallocationTrace.TraceSize = 0; + DeallocationTrace.ThreadID = kInvalidThreadID; +} + +void AllocationMetadata::RecordDeallocation() { + IsDeallocated = true; + DeallocationTrace.ThreadID = getThreadID(); +} + +void AllocationMetadata::CallSiteInfo::RecordBacktrace( + options::Backtrace_t Backtrace) { + TraceSize = 0; + if (!Backtrace) + return; + + uintptr_t UncompressedBuffer[kMaxTraceLengthToCollect]; + size_t BacktraceLength = + Backtrace(UncompressedBuffer, kMaxTraceLengthToCollect); + TraceSize = + compression::pack(UncompressedBuffer, BacktraceLength, CompressedTrace, + AllocationMetadata::kStackFrameStorageBytes); +} + +size_t AllocatorState::maximumAllocationSize() const { return PageSize; } + +uintptr_t AllocatorState::slotToAddr(size_t N) const { + return GuardedPagePool + (PageSize * (1 + N)) + (maximumAllocationSize() * N); +} + +bool AllocatorState::isGuardPage(uintptr_t Ptr) const { + assert(pointerIsMine(reinterpret_cast<void *>(Ptr))); + size_t PageOffsetFromPoolStart = (Ptr - GuardedPagePool) / PageSize; + size_t PagesPerSlot = maximumAllocationSize() / PageSize; + return (PageOffsetFromPoolStart % (PagesPerSlot + 1)) == 0; +} + +static size_t addrToSlot(const AllocatorState *State, uintptr_t Ptr) { + size_t ByteOffsetFromPoolStart = Ptr - State->GuardedPagePool; + return ByteOffsetFromPoolStart / + (State->maximumAllocationSize() + State->PageSize); +} + +size_t AllocatorState::getNearestSlot(uintptr_t Ptr) const { + if (Ptr <= GuardedPagePool + PageSize) + return 0; + if (Ptr > GuardedPagePoolEnd - PageSize) + return MaxSimultaneousAllocations - 1; + + if (!isGuardPage(Ptr)) + return addrToSlot(this, Ptr); + + if (Ptr % PageSize <= PageSize / 2) + return addrToSlot(this, Ptr - PageSize); // Round down. + return addrToSlot(this, Ptr + PageSize); // Round up. +} + +} // namespace gwp_asan